From 82254e13e9c694810855b86a6b5dfbe4b8fd6bda Mon Sep 17 00:00:00 2001 From: Fernando Date: Tue, 26 Dec 2017 11:14:28 -0200 Subject: [PATCH 01/35] Bump USER_AGENT version 0.1.22 (dev) (#3521) --- medusa/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/common.py b/medusa/common.py index e0a4cf3d93..4ec73c4e10 100644 --- a/medusa/common.py +++ b/medusa/common.py @@ -46,7 +46,7 @@ # To enable, set SPOOF_USER_AGENT = True SPOOF_USER_AGENT = False INSTANCE_ID = str(uuid.uuid1()) -VERSION = '0.1.21' +VERSION = '0.1.22' USER_AGENT = u'Medusa/{version} ({system}; {release}; {instance})'.format( version=VERSION, system=platform.system(), release=platform.release(), instance=INSTANCE_ID) From da5c216bff132e1aac17b5592e9dbdc4972f4b0b Mon Sep 17 00:00:00 2001 From: Fernando Date: Tue, 26 Dec 2017 11:14:44 -0200 Subject: [PATCH 02/35] Add multiple language tags support (#3520) --- medusa/post_processor.py | 17 ++++++++++++++--- tests/test_rename_associated_file.py | 5 +++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/medusa/post_processor.py b/medusa/post_processor.py index 4550cb4a2c..56341ffce0 100644 --- a/medusa/post_processor.py +++ b/medusa/post_processor.py @@ -61,6 +61,19 @@ from six import text_type +# Most common language tags from IETF +# https://datahub.io/core/language-codes#resource-ietf-language-tags +LANGUAGE_TAGS = { + 'en-us': 'en-US', + 'en-gb': 'en-GB', + 'en-au': 'en-AU', + 'pt-br': 'pt-BR', + 'pt-pt': 'pt-PT', + 'es-mx': 'es-MX', + 'zh-cn': 'zh-CH', + 'zh-tw': 'zh-TW', +} + class PostProcessor(object): """A class which will process a media file according to the post processing settings in the config.""" @@ -373,9 +386,7 @@ def rename_associated_file(new_path, new_basename, filepath): sub_code = split_path[1] code = sub_code.lower().replace('_', '-') if from_code(code, unknown='') or from_ietf_code(code, unknown=''): - # TODO remove this hardcoded language - if code == 'pt-br': - code = 'pt-BR' + code = LANGUAGE_TAGS.get(code, code) new_extension = code + '.' + extension extension = sub_code + '.' + extension diff --git a/tests/test_rename_associated_file.py b/tests/test_rename_associated_file.py index 4af153dbb1..4143e54945 100644 --- a/tests/test_rename_associated_file.py +++ b/tests/test_rename_associated_file.py @@ -93,6 +93,11 @@ 'filepath': 'downloads/tv/Gomorra S03 E11 - x264 .srt', 'expected': 'media/shows/gomorra/season 3/Gomorra S03E15 Episode Name.srt' }, + { # p14: Subtitle with language tag + 'new_path': 'media/shows/riko or marty/season 3/', + 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.en-au.srt', + 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.en-AU.srt' + }, ]) def test_rename_associated_file(p, create_dir, monkeypatch): """Test rename_associated_file.""" From ecd4b38ba4c39b16dc196c5245bd8226ae5834a7 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Tue, 26 Dec 2017 12:25:18 -0500 Subject: [PATCH 03/35] Incorrect language code for `Chinese Simplified, People's Republic of China` (#3523) Language code for `Chinese Simplified, People's Republic of China` is `zh-CN` not `zh-CH`. --- medusa/post_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/post_processor.py b/medusa/post_processor.py index 56341ffce0..f6ee7ab1fe 100644 --- a/medusa/post_processor.py +++ b/medusa/post_processor.py @@ -70,7 +70,7 @@ 'pt-br': 'pt-BR', 'pt-pt': 'pt-PT', 'es-mx': 'es-MX', - 'zh-cn': 'zh-CH', + 'zh-cn': 'zh-CN', 'zh-tw': 'zh-TW', } From e4010c6da85d65055193da91622d7de13a0d342d Mon Sep 17 00:00:00 2001 From: Fernando Date: Wed, 27 Dec 2017 10:19:29 -0200 Subject: [PATCH 04/35] Bump USER_AGENT version 0.1.23 (dev) (#3530) --- medusa/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/common.py b/medusa/common.py index 4ec73c4e10..3db15cd1f6 100644 --- a/medusa/common.py +++ b/medusa/common.py @@ -46,7 +46,7 @@ # To enable, set SPOOF_USER_AGENT = True SPOOF_USER_AGENT = False INSTANCE_ID = str(uuid.uuid1()) -VERSION = '0.1.22' +VERSION = '0.1.23' USER_AGENT = u'Medusa/{version} ({system}; {release}; {instance})'.format( version=VERSION, system=platform.system(), release=platform.release(), instance=INSTANCE_ID) From 7d3f0678b9d827e358981ae6ebf8538b5750463b Mon Sep 17 00:00:00 2001 From: duramato Date: Wed, 27 Dec 2017 18:00:59 +0000 Subject: [PATCH 05/35] Update imdb-pie to 4.4.0 --- ext/imdbpie/imdbpie.py | 136 ++++++++++++++++++++--------------------- requirements.txt | 2 +- 2 files changed, 66 insertions(+), 72 deletions(-) diff --git a/ext/imdbpie/imdbpie.py b/ext/imdbpie/imdbpie.py index e5f18d8bc7..6b8a8f9db9 100644 --- a/ext/imdbpie/imdbpie.py +++ b/ext/imdbpie/imdbpie.py @@ -13,7 +13,7 @@ from cachecontrol.caches import FileCache from six.moves import html_parser from six.moves import http_client as httplib -from six.moves.urllib.parse import urlencode, quote +from six.moves.urllib.parse import urlencode, quote, quote_plus from imdbpie.objects import Image, Title, Person, Episode, Review from imdbpie.constants import ( @@ -27,7 +27,7 @@ class Imdb(object): def __init__(self, api_key=None, locale=None, anonymize=False, exclude_episodes=False, user_agent=None, cache=None, - proxy_uri=None, verify_ssl=True): + proxy_uri=None, verify_ssl=True, session=None): self.api_key = api_key or SHA1_KEY self.timestamp = time.mktime(datetime.date.today().timetuple()) self.user_agent = user_agent or random.choice(USER_AGENTS) @@ -37,16 +37,17 @@ def __init__(self, api_key=None, locale=None, anonymize=False, self.proxy_uri = proxy_uri or DEFAULT_PROXY_URI self.anonymize = anonymize self.verify_ssl = verify_ssl - self.session = requests + self.session = session or requests.Session() if self.caching_enabled: warnings.warn('caching will be removed in version 5.0.0 ' 'due to not being thread safe') self.session = CacheControl( - requests.Session(), cache=FileCache('.imdbpie_cache') + self.session, cache=FileCache('.imdbpie_cache') ) def get_person_by_id(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/name/maindetails', {'nconst': imdb_id}) response = self._get(url) @@ -57,6 +58,7 @@ def get_person_by_id(self, imdb_id): return person def get_title_by_id(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/title/maindetails', {'tconst': imdb_id}) response = self._get(url) @@ -77,6 +79,7 @@ def get_title_by_id(self, imdb_id): return title def get_title_plots(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/title/plot', {'tconst': imdb_id}) response = self._get(url) @@ -87,6 +90,7 @@ def get_title_plots(self, imdb_id): return [plot.get('text') for plot in plots] def title_exists(self, imdb_id): + self.validate_imdb_id(imdb_id) page_url = 'http://www.imdb.com/title/{0}/'.format(imdb_id) if self.anonymize is True: @@ -105,74 +109,39 @@ def title_exists(self, imdb_id): response.raise_for_status() def search_for_person(self, name): - search_params = { - 'json': '1', - 'nr': 1, - 'nn': 'on', - 'q': name - } - query_params = urlencode(search_params) - search_results = self._get( - 'http://www.imdb.com/xml/find?{0}'.format(query_params)) - - target_result_keys = ( - 'name_popular', 'name_exact', 'name_approx', 'name_substring') - person_results = [] - - html_unescaped = html_parser.HTMLParser().unescape - - # Loop through all search_results and build a list - # with popular matches first - for key in target_result_keys: - - if key not in search_results.keys(): + query = quote(name) + url = 'https://v2.sg.media-imdb.com/suggests/{0}/{1}.json'.format( + query[0].lower(), query + ) + search_results = self._get(url) + results = [] + for result in search_results.get('d', ()): + if not result['id'].startswith('nm'): + # ignore non-person results continue - - for result in search_results[key]: - result_item = { - 'name': html_unescaped(result['name']), - 'imdb_id': result['id'] - } - person_results.append(result_item) - return person_results + result_item = { + 'name': result['l'], + 'imdb_id': result['id'], + } + results.append(result_item) + return results def search_for_title(self, title): - default_search_for_title_params = { - 'json': '1', - 'nr': 1, - 'tt': 'on', - 'q': title - } - query_params = urlencode(default_search_for_title_params) - search_results = self._get( - 'http://www.imdb.com/xml/find?{0}'.format(query_params) + query = quote(title) + url = 'https://v2.sg.media-imdb.com/suggests/{0}/{1}.json'.format( + query[0].lower(), query ) - - target_result_keys = ( - 'title_popular', 'title_exact', 'title_approx', 'title_substring') - title_results = [] - - html_unescaped = html_parser.HTMLParser().unescape - - # Loop through all search_results and build a list - # with popular matches first - for key in target_result_keys: - - if key not in search_results.keys(): - continue - - for result in search_results[key]: - year_match = re.search(r'(\d{4})', result['title_description']) - year = year_match.group(0) if year_match else None - - result_item = { - 'title': html_unescaped(result['title']), - 'year': year, - 'imdb_id': result['id'] - } - title_results.append(result_item) - - return title_results + search_results = self._get(url) + results = [] + for result in search_results.get('d', ()): + result_item = { + 'title': result['l'], + 'year': str(result.get('y')) if result.get('y') else None, + 'imdb_id': result['id'], + 'type': result.get('q'), + } + results.append(result_item) + return results def top_250(self): url = self._build_url('/chart/top', {}) @@ -190,12 +159,14 @@ def popular_movies(self): return response['data']['list'] def get_title_images(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/title/photos', {'tconst': imdb_id}) response = self._get(url) return self._get_images(response) def get_title_reviews(self, imdb_id, max_results=None): """Retrieve reviews for a title ordered by 'Best' descending""" + self.validate_imdb_id(imdb_id) user_comments = self._get_reviews_data( imdb_id, max_results=max_results @@ -211,11 +182,13 @@ def get_title_reviews(self, imdb_id, max_results=None): return title_reviews def get_person_images(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/name/photos', {'nconst': imdb_id}) response = self._get(url) return self._get_images(response) def get_episodes(self, imdb_id): + self.validate_imdb_id(imdb_id) if self.exclude_episodes: raise ValueError('exclude_episodes is currently set') @@ -244,6 +217,7 @@ def get_episodes(self, imdb_id): return episodes def _get_credits_data(self, imdb_id): + self.validate_imdb_id(imdb_id) url = self._build_url('/title/fullcredits', {'tconst': imdb_id}) response = self._get(url) @@ -253,6 +227,7 @@ def _get_credits_data(self, imdb_id): return response.get('data').get('credits') def _get_reviews_data(self, imdb_id, max_results=None): + self.validate_imdb_id(imdb_id) params = {'tconst': imdb_id} if max_results: params['limit'] = max_results @@ -277,15 +252,34 @@ def _cache_response(file_path, resp): with open(file_path, 'w+') as f: json.dump(resp, f) + def _parse_dirty_json(self, data): + match_json_within_dirty_json = r'imdb\$[\w_]+\({1}(.+)\){1}' + data_clean = re.match( + match_json_within_dirty_json, data, re.IGNORECASE + ).groups()[0] + return json.loads(data_clean) + + @staticmethod + def validate_imdb_id(imdb_id): + match_id = r'[a-zA-Z]{2}[0-9]{7}' + try: + re.match(match_id, imdb_id, re.IGNORECASE).group() + except (AttributeError, TypeError): + raise ValueError('invalid imdb id') + def _get(self, url): resp = self.session.get( url, headers={'User-Agent': self.user_agent}, - verify=self.verify_ssl) + verify=self.verify_ssl + ) resp.raise_for_status() - - resp_dict = json.loads(resp.content.decode('utf-8')) + resp_data = resp.content.decode('utf-8') + try: + resp_dict = json.loads(resp_data) + except ValueError: + resp_dict = self._parse_dirty_json(resp_data) if resp_dict.get('error'): return None diff --git a/requirements.txt b/requirements.txt index 191cb1ab06..499564b131 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ git+https://github.com/kurtmckee/feedparser.git@f1dd1bb923ebfe6482fc2521c1f150b4 futures==3.1.1 guessit==2.1.4 html5lib==0.999999999 -imdbpie==4.3.0 +imdbpie==4.4.0 jsonrpclib==0.1.7 knowit==0.2.4 lockfile==0.12.2 From a7e70a20eec21e45b4e12df28694a8d8f4df5835 Mon Sep 17 00:00:00 2001 From: Dario Date: Wed, 27 Dec 2017 20:05:10 +0100 Subject: [PATCH 06/35] Fix freeleech option not being saved. Fixes #3524 (#3538) --- medusa/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/__main__.py b/medusa/__main__.py index 66d040fa24..086d673836 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -1535,7 +1535,7 @@ def save_config(): ], GenericProvider.TORRENT: [ 'custom_url', 'digest', 'hash', 'passkey', 'pin', 'confirmed', 'ranked', 'engrelease', 'onlyspasearch', - 'sorting', 'ratio', 'minseed', 'minleech', 'options', 'freelech', 'cat', 'subtitle', 'cookies', + 'sorting', 'ratio', 'minseed', 'minleech', 'options', 'freeleech', 'cat', 'subtitle', 'cookies', ], GenericProvider.NZB: [ 'cat_ids' From 74b3e4c3cb25197d9004885a72f4d6a4bee4cb14 Mon Sep 17 00:00:00 2001 From: Alexis Tyler Date: Thu, 28 Dec 2017 09:03:26 +1030 Subject: [PATCH 07/35] replace boobysteel docker hub link with pymedusa's one --- readme.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/readme.md b/readme.md index 8af2ea5c89..096330ca49 100644 --- a/readme.md +++ b/readme.md @@ -95,6 +95,4 @@ The [linuxserver.io](https://www.linuxserver.io) team have kindly produced docke * armhf - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa/), [Github](https://github.com/linuxserver/docker-medusa-armhf) * aarch64 - [Dockerhub](https://hub.docker.com/r/lsioarmhf/medusa-aarch64/), [Github](https://github.com/linuxserver/docker-medusa-arm64) -Now that the Dockerfile is native as of Dec 6 2017, there's also a direct build available here [Dockerhub](https://hub.docker.com/r/bobbysteel/medusa/), built directly from this repo on a daily basis. - - +There's also a direct build available here [Dockerhub](https://hub.docker.com/r/pymedusa/medusa/) which is updated directly from this repo on every commit. From 4620266d9d019b3ded049bc399faf763fdadf5aa Mon Sep 17 00:00:00 2001 From: h3llrais3r Date: Thu, 28 Dec 2017 17:02:31 +0100 Subject: [PATCH 08/35] Fix anime split show layout (#3544) --- views/partials/home/banner.mako | 6 ++++-- views/partials/home/poster.mako | 2 ++ views/partials/home/simple.mako | 6 ++++-- views/partials/home/small.mako | 10 +++++----- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/views/partials/home/banner.mako b/views/partials/home/banner.mako index 3227866c2d..1aa0f3e82a 100644 --- a/views/partials/home/banner.mako +++ b/views/partials/home/banner.mako @@ -13,8 +13,10 @@ % for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if cur_list_type == "Anime": -

Anime List

+ % if len(show_lists) > 1: + % if len(show_lists) > 1: +

${cur_list_type}

+ % endif % endif diff --git a/views/partials/home/poster.mako b/views/partials/home/poster.mako index d205de6df7..606cc86fab 100644 --- a/views/partials/home/poster.mako +++ b/views/partials/home/poster.mako @@ -16,6 +16,7 @@ <% my_show_list = list(cur_show_list[1]) %>
+ % if len(show_lists) > 1:
+ % endif
% for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: % if cur_loading_show.show is None: diff --git a/views/partials/home/simple.mako b/views/partials/home/simple.mako index 76376bac26..e4a784ab76 100644 --- a/views/partials/home/simple.mako +++ b/views/partials/home/simple.mako @@ -15,8 +15,10 @@ % for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if cur_list_type == "Anime": -

Anime List

+ % if len(show_lists) > 1: + % if len(show_lists) > 1: +

${cur_list_type}

+ % endif % endif
diff --git a/views/partials/home/small.mako b/views/partials/home/small.mako index 4d863e7de7..fcd84dfcf3 100644 --- a/views/partials/home/small.mako +++ b/views/partials/home/small.mako @@ -11,12 +11,12 @@ %> <%namespace file="/inc_defs.mako" import="renderQualityPill"/> % for cur_show_list in show_lists: - <% curListType = cur_show_list[0] %> + <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if curListType == "Anime": -

Anime List

+ % if len(show_lists) > 1: +

${cur_list_type}

% endif -
+
@@ -34,7 +34,7 @@ - + From dc7ff5cbfcb3caa65895d61e72ec725d786b32ff Mon Sep 17 00:00:00 2001 From: p0ps Date: Thu, 28 Dec 2017 17:46:24 +0100 Subject: [PATCH 09/35] Continue when there is no image in the sorted list. (#3545) --- medusa/indexers/indexer_base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/medusa/indexers/indexer_base.py b/medusa/indexers/indexer_base.py index 81334484c0..015c81cc34 100644 --- a/medusa/indexers/indexer_base.py +++ b/medusa/indexers/indexer_base.py @@ -257,6 +257,9 @@ def _save_images(self, series_id, images): } ) + if not images_by_rating: + continue + # Get the highest rated image highest_rated = images_by_rating[0] img_url = highest_rated['_bannerpath'] From 59bced5f8d0b50b97794d42a1b0d4745e849141b Mon Sep 17 00:00:00 2001 From: p0ps Date: Thu, 28 Dec 2017 19:00:18 +0100 Subject: [PATCH 10/35] Removed redundant if/then. (#3546) * Removed redundant if/then. * Indent like the rest --- views/partials/home/banner.mako | 4 +--- views/partials/home/poster.mako | 18 +++++++++--------- views/partials/home/simple.mako | 4 +--- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/views/partials/home/banner.mako b/views/partials/home/banner.mako index 1aa0f3e82a..8cf7fdc327 100644 --- a/views/partials/home/banner.mako +++ b/views/partials/home/banner.mako @@ -14,9 +14,7 @@ <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> % if len(show_lists) > 1: - % if len(show_lists) > 1: -

${cur_list_type}

- % endif +

${cur_list_type}

% endif
Next Ep
Add ${('Show', 'Anime')[curListType == 'Anime']}Add ${('Show', 'Anime')[cur_list_type == 'Anime']}      
diff --git a/views/partials/home/poster.mako b/views/partials/home/poster.mako index 606cc86fab..506849d02e 100644 --- a/views/partials/home/poster.mako +++ b/views/partials/home/poster.mako @@ -17,15 +17,15 @@
% if len(show_lists) > 1: -
- -

${cur_list_type}

-
-
+
+ +

${cur_list_type}

+
+
% endif
% for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: diff --git a/views/partials/home/simple.mako b/views/partials/home/simple.mako index e4a784ab76..fa0f418f8a 100644 --- a/views/partials/home/simple.mako +++ b/views/partials/home/simple.mako @@ -16,9 +16,7 @@ <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> % if len(show_lists) > 1: - % if len(show_lists) > 1: -

${cur_list_type}

- % endif +

${cur_list_type}

% endif
From 7770476d05718251c831b3e4f676aafa6806efdd Mon Sep 17 00:00:00 2001 From: p0ps Date: Fri, 29 Dec 2017 12:49:34 +0100 Subject: [PATCH 11/35] =?UTF-8?q?Decode=20msg=20from=20byte=20to=20unicode?= =?UTF-8?q?,=20to=20prevent=20error=20with=20unicode=20encode=E2=80=A6=20(?= =?UTF-8?q?#3547)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Decode msg from byte to unicode, to prevent error with unicode encoded show titles. * Use text_type in stead of str(). --- medusa/logger/adapters/style.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/logger/adapters/style.py b/medusa/logger/adapters/style.py index 483261e45b..cf280f6a5e 100644 --- a/medusa/logger/adapters/style.py +++ b/medusa/logger/adapters/style.py @@ -33,7 +33,7 @@ def __str__(self): args = [] kwargs = self.args[0] - msg = str(self.msg) + msg = text_type(self.msg) try: return msg.format(*args, **kwargs) From ddeb608f6acf633568550075423479c0a8ac1c80 Mon Sep 17 00:00:00 2001 From: supergonkas Date: Fri, 29 Dec 2017 20:47:49 +0000 Subject: [PATCH 12/35] Fix Torrentz2 being to strict on the category --- medusa/providers/torrent/xml/torrentz2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/providers/torrent/xml/torrentz2.py b/medusa/providers/torrent/xml/torrentz2.py index 501b609d0c..9af82caba3 100644 --- a/medusa/providers/torrent/xml/torrentz2.py +++ b/medusa/providers/torrent/xml/torrentz2.py @@ -105,7 +105,7 @@ def parse(self, data, mode): for row in torrent_rows: try: - if row.category and 'tv' not in row.category.get_text(strip=True).lower(): + if row.category and 'video' not in row.category.get_text(strip=True).lower(): continue title_raw = row.title.text From 9f792cc29f383b924a6f8be11475943aba9eaea4 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Fri, 29 Dec 2017 16:32:33 -0500 Subject: [PATCH 13/35] Classless ImageCache (#3511) * Remove all images for a show, using the ImageClass(). * Clean up remove images * Clean up remove images * Make image cache classless * Make artwork paths more dry, deprecate old methods * Make has_ methods more dry, deprecate old methods * Remove path dict * Move deprecated paths to bottom of list * Refactor TYPE_NAMES -> IMAGE_TYPES * Make fill_cache more dry * Make _cache_image_from_indexer more dry * Make _cache_image_from_file more dry * Make GenericMedia more pythonic and more dry * Use os.path.isfile for get_artwork since it should be a file not a path. * Update logging and make aspect ratio detection more dry * Fix get_media refactor to media * Fix logging handler Fix incorrect log params Fix incorrect detection of existing path Fix striping characters from fanart * Flake8 import order * Flake8 * Fix imports * Fix docstrings * Fix docstring and import * Fix docstrings * Fix incorrect refactor in tests --- medusa/image_cache.py | 705 +++++++++--------- medusa/media/banner.py | 27 +- medusa/media/fan_art.py | 14 +- medusa/media/generic.py | 84 +-- medusa/media/network_logo.py | 21 +- medusa/media/poster.py | 27 +- medusa/server/api/v1/core.py | 27 +- medusa/server/api/v2/series_asset.py | 2 +- medusa/server/web/manage/handler.py | 45 +- medusa/tv/series.py | 17 +- tests/legacy/media/generic_media_tests.py | 10 +- tests/legacy/media/show_banner_tests.py | 4 +- tests/legacy/media/show_fan_art_tests.py | 4 +- tests/legacy/media/show_network_logo_tests.py | 4 +- tests/legacy/media/show_poster_tests.py | 4 +- 15 files changed, 496 insertions(+), 499 deletions(-) diff --git a/medusa/image_cache.py b/medusa/image_cache.py index 0a176e0129..920ee26b94 100644 --- a/medusa/image_cache.py +++ b/medusa/image_cache.py @@ -18,376 +18,385 @@ from __future__ import unicode_literals +import logging import os.path +import warnings + +from medusa import app +from medusa.helper.common import try_int +from medusa.helper.exceptions import ShowDirectoryNotFoundException +from medusa.helpers import copy_file, get_image_size +from medusa.logger.adapters.style import BraceAdapter +from medusa.metadata.generic import GenericMetadata + +log = BraceAdapter(logging.getLogger(__name__)) +log.logger.addHandler(logging.NullHandler()) + +BANNER = 1 +POSTER = 2 +BANNER_THUMB = 3 +POSTER_THUMB = 4 +FANART = 5 + +IMAGE_TYPES = { + BANNER: 'banner', + POSTER: 'poster', + BANNER_THUMB: 'banner_thumb', + POSTER_THUMB: 'poster_thumb', + FANART: 'fanart', +} + +# TMDB aspect ratios and image sizes: +# https://www.themoviedb.org/documentation/editing/images?language=en + +# TVDB aspect ratios and image sizes: +# https://www.thetvdb.com/wiki/index.php/Posters +# https://www.thetvdb.com/wiki/index.php/Series_Banners +# https://www.thetvdb.com/wiki/index.php/Fan_Art + + +# min, median, and max aspect ratios by type +ASPECT_RATIOS = { + # most banner aspect ratios are ~5.4 (eg. 758/140) + BANNER: [5, 5.4, 6], + # most poster aspect ratios are ~0.68 (eg. 680/1000) + POSTER: [0.55, 0.68, 0.8], + # most fanart aspect ratios are ~1.777 (eg. 1280/720 and 1920/1080) + FANART: [1.2, 1.777, 2.5], +} + + +def _cache_dir(): + """Build path to the image cache directory.""" + return os.path.abspath(os.path.join(app.CACHE_DIR, 'images')) + + +def _thumbnails_dir(): + """Build path to the thumbnail image cache directory.""" + return os.path.abspath(os.path.join(_cache_dir(), 'thumbnails')) + + +def get_path(img_type, series_id): + """ + Build path to a series cached artwork. + + :param img_type: integer constant representing an image type + :param series_id: the series id + + :return: full path and filename for artwork + """ + image = IMAGE_TYPES[img_type] + thumbnail = image.endswith('_thumb') + if thumbnail: + location = _thumbnails_dir() + image = image[:-len('_thumb')] # strip `_thumb` from the end + else: + location = _cache_dir() + filename = '{series_id}.{image}.jpg'.format( + series_id=series_id, + image=image, + ) + return os.path.join(location, filename) + + +def get_artwork(img_type, series_id): + """ + Get path to cached artwork for a series. + + :param img_type: integer constant representing an image type + :param series_id: the series id + + :return: full path and filename for artwork if it exists + """ + location = get_path(img_type, series_id) + if os.path.isfile(location): + return location + + +def which_type(path): + """ + Analyze image and attempt to determine its type. + + :param path: full path to the image + :return: artwork type if detected, or None + """ + if not os.path.isfile(path): + log.warning('Could not check type, file does not exist: {0}', path) + return + + if not try_int(os.path.getsize(path)): + log.warning('Deleting 0 byte image: {0}', path) + try: + os.remove(path) + except OSError as error: + log.warning( + 'Failed to delete file: {path}. Please delete it manually.' + ' Error: {msg}', {'path': path, 'msg': error}) + return + + image_dimension = get_image_size(path) + if not image_dimension: + log.debug('Skipping image. Unable to get metadata from {0}', path) + return + + height, width = image_dimension + aspect_ratio = width / float(height) + log.debug('Image aspect ratio: {0}', aspect_ratio) + + for img_type in ASPECT_RATIOS: + min_ratio, median_ratio, max_ratio = ASPECT_RATIOS[img_type] + if min_ratio < aspect_ratio < max_ratio: + log.debug('{image} detected based on aspect ratio.', + {'image': IMAGE_TYPES[img_type]}) + return img_type + else: + log.warning('Aspect ratio ({0}) does not match any known types.', + aspect_ratio) + return + + +def replace_images(series): + """ + Replace cached images for a series based on image type. + + :param series: Series object + """ + remove_images(series) + fill_cache(series) + + +def remove_images(series, image_types=None): + """ + Remove cached images for a series based on image type. + + :param series: Series object + :param image_types: iterable of integers for image types to remove + if no image types passed, remove all images + """ + image_types = image_types or IMAGE_TYPES + series_id = series.indexerid + series_name = series.name + + for image_type in image_types: + cur_path = get_path(image_type, series_id) + + # see if image exists + if not os.path.isfile(cur_path): + continue + + # try to remove image + try: + os.remove(cur_path) + except OSError as error: + log.error( + 'Could not remove {img} for series {name} from cache' + ' [{loc}]: {msg}', { + 'img': IMAGE_TYPES[image_type], + 'name': series_name, + 'loc': cur_path, + 'msg': error, + } + ) + else: + log.info('Removed {img} for series {name}', + {'img': IMAGE_TYPES[image_type], 'name': series_name}) + + +def _cache_image_from_file(image_path, img_type, series_id): + """ + Take the image provided and copy it to the cache folder. + + :param image_path: path to the image we're caching + :param img_type: BANNER or POSTER or FANART + :param series_id: id of the show this image belongs to + :return: bool representing success + """ + # generate the path based on the type and the indexer_id + if img_type in (POSTER, BANNER, FANART): + location = get_path(img_type, series_id) + else: + type_name = IMAGE_TYPES.get(img_type, img_type) + log.error('Invalid cache image type: {0}', type_name) + return + + directories = { + 'image': _cache_dir(), + 'thumbnail': _thumbnails_dir(), + } + + for cache in directories: + cache_dir = directories[cache] + if not os.path.isdir(cache_dir): + log.info('Creating {0} cache directory: {1}', cache, cache_dir) + os.makedirs(cache_dir) + + log.info('Copying from {origin} to {dest}', + {'origin': image_path, 'dest': location}) + + copy_file(image_path, location) + + return True + -from . import app, logger -from .helper.exceptions import ShowDirectoryNotFoundException -from .metadata.generic import GenericMetadata +def _cache_image_from_indexer(series, img_type): + """ + Retrieve specified artwork from the indexer and save to the cache folder. + :param series: Series object that we want to cache an image for + :param img_type: BANNER or POSTER or FANART + :return: bool representing success + """ + # generate the path based on the type and the indexer_id + try: + img_type_name = IMAGE_TYPES[img_type] + except KeyError: + log.error('Invalid cache image type: {0}', img_type) + return -class ImageCache(object): + series_id = series.indexerid + location = get_path(img_type, series_id) - BANNER = 1 - POSTER = 2 - BANNER_THUMB = 3 - POSTER_THUMB = 4 - FANART = 5 + # retrieve the image from the indexer using the generic metadata class + # TODO: refactor + metadata_generator = GenericMetadata() + img_data = metadata_generator._retrieve_show_image(img_type_name, series) + result = metadata_generator._write_image(img_data, location) - IMAGE_TYPE_NAMES = { - BANNER: 'banner', - POSTER: 'poster', - BANNER_THUMB: 'banner_thumb', - POSTER_THUMB: 'poster_thumb', - FANART: 'fanart', + return result + + +def fill_cache(series): + """ + Cache artwork for the given show. + + Copy artwork from series directory if possible, or download from indexer. + + :param series: Series object to cache images for + """ + series_id = series.name + # get expected paths for artwork + images = { + img_type: get_path(img_type, series_id) + for img_type in IMAGE_TYPES + } + # check if artwork is cached + needed = { + img_type: location + for img_type, location in images.items() + if not os.path.exists(location) } - def __init__(self): - pass - - def __del__(self): - pass - - @property - def path(self): - return { - self.POSTER: self.poster_path, - self.BANNER: self.banner_path, - self.POSTER_THUMB: self.poster_thumb_path, - self.BANNER_THUMB: self.banner_thumb_path, - self.FANART: self.fanart_path, - } - - @classmethod - def _cache_dir(cls): - """Build up the full path to the image cache directory.""" - return os.path.abspath(os.path.join(app.CACHE_DIR, 'images')) - - def _thumbnails_dir(self): - """Build up the full path to the thumbnails image cache directory.""" - return os.path.abspath(os.path.join(self._cache_dir(), 'thumbnails')) - - @classmethod - def poster_path(cls, indexer_id): - """ - Build up the path to a poster cache for a given Indexer ID. - - :param indexer_id: ID of the show to use in the file name - :return: a full path to the cached poster file for the given Indexer ID - """ - poster_file_name = '{0}.poster.jpg'.format(indexer_id) - return os.path.join(cls._cache_dir(), poster_file_name) - - @classmethod - def banner_path(cls, indexer_id): - """ - Build up the path to a banner cache for a given Indexer ID. - - :param indexer_id: ID of the show to use in the file name - :return: a full path to the cached banner file for the given Indexer ID - """ - banner_file_name = '{0}.banner.jpg'.format(indexer_id) - return os.path.join(cls._cache_dir(), banner_file_name) - - def fanart_path(self, indexer_id): - """ - Build up the path to a fanart cache for a given Indexer ID. - - :param indexer_id: ID of the show to use in the file name - :return: a full path to the cached fanart file for the given Indexer ID - """ - fanart_file_name = '{0}.fanart.jpg'.format(indexer_id) - return os.path.join(self._cache_dir(), fanart_file_name) - - def poster_thumb_path(self, indexer_id): - """ - Build up the path to a poster thumb cache for a given Indexer ID. - - :param indexer_id: ID of the show to use in the file name - :return: a full path to the cached poster thumb file for the given Indexer ID - """ - posterthumb_file_name = '{0}.poster.jpg'.format(indexer_id) - return os.path.join(self._thumbnails_dir(), posterthumb_file_name) - - def banner_thumb_path(self, indexer_id): - """ - Build up the path to a banner thumb cache for a given Indexer ID. - - :param indexer_id: ID of the show to use in the file name - :return: a full path to the cached banner thumb file for the given Indexer ID - """ - bannerthumb_file_name = '{0}.banner.jpg'.format(indexer_id) - return os.path.join(self._thumbnails_dir(), bannerthumb_file_name) - - def has_poster(self, indexer_id): - """Return true if a cached poster exists for the given Indexer ID.""" - poster_path = self.poster_path(indexer_id) - bool_result = os.path.isfile(poster_path) - logger.log('Checking if file {0} exists: {1}'.format(poster_path, bool_result), logger.DEBUG) - return bool_result - - def has_banner(self, indexer_id): - """Return true if a cached banner exists for the given Indexer ID.""" - banner_path = self.banner_path(indexer_id) - bool_result = os.path.isfile(banner_path) - logger.log('Checking if file {0} exists: {1}'.format(banner_path, bool_result), logger.DEBUG) - return bool_result - - def has_fanart(self, indexer_id): - """Return true if a cached fanart exists for the given Indexer ID.""" - fanart_path = self.fanart_path(indexer_id) - bool_result = os.path.isfile(fanart_path) - logger.log('Checking if file {0} exists: {1}'.format(fanart_path, bool_result), logger.DEBUG) - return bool_result - - def has_poster_thumbnail(self, indexer_id): - """Return true if a cached poster thumbnail exists for the given Indexer ID.""" - poster_thumb_path = self.poster_thumb_path(indexer_id) - bool_result = os.path.isfile(poster_thumb_path) - logger.log('Checking if file {0} exists: {1}'.format(poster_thumb_path, bool_result), logger.DEBUG) - return bool_result - - def has_banner_thumbnail(self, indexer_id): - """Return true if a cached banner exists for the given Indexer ID.""" - banner_thumb_path = self.banner_thumb_path(indexer_id) - bool_result = os.path.isfile(banner_thumb_path) - logger.log('Checking if file {0} exists: {1}'.format(banner_thumb_path, bool_result), logger.DEBUG) - return bool_result - - def which_type(self, image_path): - """ - Analyze the image provided and attempt to determine whether it is a poster or a banner. - - :param image_path: full path to the image - :return: BANNER, POSTER if it concluded one or the other, or None if the image was neither (or didn't exist) - """ - from .helpers import get_image_size - from .helper.common import try_int - - if not os.path.isfile(image_path): - logger.log("Couldn't check the type of {image_path} because it doesn't exist".format - (image_path=image_path), logger.WARNING) - return + if not needed: + log.debug('No new cache images needed') + log.info('Cache check completed') + return - if try_int(os.path.getsize(image_path)) == 0: - logger.log('Image has 0 bytes size. Deleting it: {image_path}'.format - (image_path=image_path), logger.WARNING) - try: - os.remove(image_path) - except OSError as e: - logger.log("Couldn't delete file: {image_path}. Please manually delete it. Error: {error_msg}".format - (image_path=image_path, error_msg=e), logger.WARNING) - return + log.debug('Searching for images for series id {0}', series_id) - image_dimension = get_image_size(image_path) - if not image_dimension: - logger.log('Unable to get metadata from {image_path}, not using your existing image'.format - (image_path=image_path), logger.DEBUG) - return + # check the show for poster, banner or fanart + for img_type in BANNER, POSTER, FANART: + if not needed.get(img_type): + continue + try: + for provider in app.metadata_provider_dict.values(): + log.debug('Checking {provider} metadata for {img}', + {'provider': provider, 'img': IMAGE_TYPES[img_type]}) - height, width = image_dimension - img_ratio = float(width) / float(height) + if os.path.isfile(provider.get_poster_path(series)): + path = provider.get_poster_path(series) + filename = os.path.abspath(path) + file_type = which_type(filename) - # most posters are around 0.68 width/height ratio (eg. 680/1000) - if 0.55 < img_ratio < 0.8: - return self.POSTER + if not file_type: + log.warning('Unable to determine image type for {0}', + filename) + continue - # most banners are around 5.4 width/height ratio (eg. 758/140) - elif 5 < img_ratio < 6: - return self.BANNER + desired = needed.get(file_type) + type_name = IMAGE_TYPES[file_type] + log.debug( + 'Wanted {img} {path}: {status}', { + 'img': type_name, + 'path': filename, + 'status': bool(desired) + } + ) - # most fanarts are around 1.77777 width/height ratio (eg. 1280/720 and 1920/1080) - elif 1.7 < img_ratio < 1.8: - return self.FANART - else: - logger.log('Image has size ratio of {img_ratio}, unknown type'.format(img_ratio=img_ratio), logger.WARNING) - return + if desired: + # cache the image + _cache_image_from_file(filename, file_type, series_id) + log.debug('Cached {img} from series folder: {path}', + {'img': type_name, 'path': filename}) + # remove it from the needed image types + needed.pop(file_type) - def replace_images(self, series): - """ - Replace cached images for a series based on image type. - - :param series: Series object - """ - self.remove_images(series) - self.fill_cache(series) - - def remove_images(self, series, image_types=None): - """ - Remove cached images for a series based on image type. - - :param series: Series object - :param image_types: iterable of integers for image types to remove - if no image types passed, remove all images - """ - image_types = image_types or self.IMAGE_TYPE_NAMES - series_id = series.indexerid - series_name = series.name - - for image_type in image_types: - cur_path = self.path[image_type](series_id) - - # see if image exists - try: - if not os.path.isfile(cur_path): - continue - except OSError: - continue - - # try to remove image - try: - os.remove(cur_path) - except OSError as error: - logger.log( - 'Could not remove {img} from cache [{loc}]: {msg}'.format( - img=self.IMAGE_TYPE_NAMES[image_type], - loc=cur_path, - msg=error, - ), - logger.WARNING, - ) - else: - logger.log( - 'Removed {img} for series {name}'.format( - img=self.IMAGE_TYPE_NAMES[image_type], - name=series_name - ), - logger.INFO, - ) - - def _cache_image_from_file(self, image_path, img_type, indexer_id): - """ - Take the image provided and copy it to the cache folder. - - :param image_path: path to the image we're caching - :param img_type: BANNER or POSTER or FANART - :param indexer_id: id of the show this image belongs to - :return: bool representing success - """ - from . import helpers - # generate the path based on the type and the indexer_id - if img_type == self.POSTER: - dest_path = self.poster_path(indexer_id) - elif img_type == self.BANNER: - dest_path = self.banner_path(indexer_id) - elif img_type == self.FANART: - dest_path = self.fanart_path(indexer_id) - else: - logger.log('Invalid cache image type: {0}'.format(img_type), logger.ERROR) - return False - - # make sure the cache folder exists before we try copying to it - if not os.path.isdir(self._cache_dir()): - logger.log("Image cache dir doesn't exist, creating it at: {0}".format(self._cache_dir())) - os.makedirs(self._cache_dir()) - - if not os.path.isdir(self._thumbnails_dir()): - logger.log("Thumbnails cache dir didn't exist, creating it at: {0}".format(self._thumbnails_dir())) - os.makedirs(self._thumbnails_dir()) - - logger.log('Copying from {origin} to {dest}'.format(origin=image_path, dest=dest_path)) - helpers.copy_file(image_path, dest_path) - - return True - - def _cache_image_from_indexer(self, show_obj, img_type): - """ - Retrieve an image of the type specified from the indexer and save it to the cache folder. - - :param show_obj: Series object that we want to cache an image for - :param img_type: BANNER or POSTER or FANART - :return: bool representing success - """ - # generate the path based on the type and the indexer_id - if img_type == self.POSTER: - img_type_name = 'poster' - dest_path = self.poster_path(show_obj.indexerid) - elif img_type == self.BANNER: - img_type_name = 'banner' - dest_path = self.banner_path(show_obj.indexerid) - elif img_type == self.POSTER_THUMB: - img_type_name = 'poster_thumb' - dest_path = self.poster_thumb_path(show_obj.indexerid) - elif img_type == self.BANNER_THUMB: - img_type_name = 'banner_thumb' - dest_path = self.banner_thumb_path(show_obj.indexerid) - elif img_type == self.FANART: - img_type_name = 'fanart' - dest_path = self.fanart_path(show_obj.indexerid) - else: - logger.log('Invalid cache image type: {0}'.format(img_type), logger.ERROR) - return False - - # retrieve the image from the indexer using the generic metadata class - # TODO: refactor - metadata_generator = GenericMetadata() - img_data = metadata_generator._retrieve_show_image(img_type_name, show_obj) - result = metadata_generator._write_image(img_data, dest_path) - - return result - - def fill_cache(self, show_obj): - """ - Cache all images for the given show. - - Copies them from the show dir if possible, or downloads them from indexer if they aren't in the show dir. - - :param show_obj: Series object to cache images for - """ - logger.log('Checking if we need any cache images for show: {0}'.format(show_obj.name), logger.DEBUG) - - # check if the images are already cached or not - need_images = {self.POSTER: not self.has_poster(show_obj.indexerid), - self.BANNER: not self.has_banner(show_obj.indexerid), - self.POSTER_THUMB: not self.has_poster_thumbnail(show_obj.indexerid), - self.BANNER_THUMB: not self.has_banner_thumbnail(show_obj.indexerid), - self.FANART: not self.has_fanart(show_obj.indexerid)} - - should_continue = None - for key in need_images: - if need_images.get(key): - should_continue = True - break - - if not should_continue: - logger.log('No new cache images needed, not retrieving new ones', logger.DEBUG) - logger.log('Cache check done') - return + except ShowDirectoryNotFoundException: + log.warning('Path does not exist. Unable to search it for images.') + + # download missing images from indexer + for img_type in needed: + log.debug('Searching for {img} for series_id {x}', + {'img': IMAGE_TYPES[img_type], 'x': series_id}) + _cache_image_from_indexer(series, img_type) + + log.info('Cache check completed') + + +def banner_path(indexer_id): + """DEPRECATED: Build path to a series cached artwork. Use `get_path`.""" + warnings.warn('Deprecated use get_path instead', DeprecationWarning) + return get_path(BANNER, indexer_id) + + +def banner_thumb_path(indexer_id): + """DEPRECATED: Build path to a series cached artwork. Use `get_path`.""" + warnings.warn('Deprecated use get_path instead', DeprecationWarning) + return get_path(BANNER_THUMB, indexer_id) + + +def fanart_path(indexer_id): + """DEPRECATED: Build path to a series cached artwork. Use `get_path`.""" + warnings.warn('Deprecated use get_path instead', DeprecationWarning) + return get_path(FANART, indexer_id) + + +def poster_path(indexer_id): + """DEPRECATED: Build path to a series cached artwork. Use `get_path`.""" + warnings.warn('Deprecated use get_path instead', DeprecationWarning) + return get_path(POSTER, indexer_id) + + +def poster_thumb_path(indexer_id): + """DEPRECATED: Build path to a series cached artwork. Use `get_path`.""" + warnings.warn('Deprecated use get_path instead', DeprecationWarning) + return get_path(POSTER_THUMB, indexer_id) - # check the show dir for poster, banner or fanart images and use them - if any([need_images[self.POSTER], need_images[self.BANNER], need_images[self.FANART]]): - try: - for cur_provider in app.metadata_provider_dict.values(): - logger.log('Checking if we can use the show image from the {provider} metadata'.format - (provider=cur_provider.name), logger.DEBUG) - if os.path.isfile(cur_provider.get_poster_path(show_obj)): - cur_file_name = os.path.abspath(cur_provider.get_poster_path(show_obj)) - cur_file_type = self.which_type(cur_file_name) +def has_poster(indexer_id): + """DEPRECATED: Check if artwork exists for series. Use `get_artwork`.""" + warnings.warn('Deprecated use get_artwork instead', DeprecationWarning) + return get_artwork(POSTER, indexer_id) - if cur_file_type is None: - logger.log('Unable to retrieve image type, not using the image from: {0}'.format - (cur_file_name), logger.WARNING) - continue - logger.log('Checking if image {0} (type {1}) needs metadata: {2}'.format - (cur_file_name, cur_file_type, need_images[cur_file_type]), logger.DEBUG) +def has_banner(indexer_id): + """DEPRECATED: Check if artwork exists for series. Use `get_artwork`.""" + warnings.warn('Deprecated use get_artwork instead', DeprecationWarning) + return get_artwork(BANNER, indexer_id) - if cur_file_type in need_images and need_images[cur_file_type]: - logger.log("Found an image in the show dir that doesn't exist in the cache, " - "caching it: {0} (type {1})".format(cur_file_name, cur_file_type), logger.DEBUG) - self._cache_image_from_file(cur_file_name, cur_file_type, show_obj.indexerid) - need_images[cur_file_type] = False +def has_fanart(indexer_id): + """DEPRECATED: Check if artwork exists for series. Use `get_artwork`.""" + warnings.warn('Deprecated use get_artwork instead', DeprecationWarning) + return get_artwork(FANART, indexer_id) - except ShowDirectoryNotFoundException: - logger.log("Unable to search for images in the show dir because it doesn't exist", logger.WARNING) - # download missing images from indexer - for cur_image_type in need_images: - logger.log('Seeing if we still need an image of type {0}: {1}'.format - (cur_image_type, need_images[cur_image_type]), logger.DEBUG) +def has_poster_thumbnail(indexer_id): + """DEPRECATED: Check if artwork exists for series. Use `get_artwork`.""" + warnings.warn('Deprecated use get_artwork instead', DeprecationWarning) + return get_artwork(POSTER_THUMB, indexer_id) - if cur_image_type in need_images and need_images[cur_image_type]: - self._cache_image_from_indexer(show_obj, cur_image_type) - logger.log('Cache check done') +def has_banner_thumbnail(indexer_id): + """DEPRECATED: Check if artwork exists for series. Use `get_artwork`.""" + warnings.warn('Deprecated use get_artwork instead', DeprecationWarning) + return get_artwork(BANNER_THUMB, indexer_id) diff --git a/medusa/media/banner.py b/medusa/media/banner.py index 16b8122cb4..a8ea7a9fbc 100644 --- a/medusa/media/banner.py +++ b/medusa/media/banner.py @@ -16,22 +16,21 @@ # You should have received a copy of the GNU General Public License # along with Medusa. If not, see . -from .generic import GenericMedia -from ..image_cache import ImageCache +from medusa import image_cache +from medusa.media.generic import GenericMedia class ShowBanner(GenericMedia): """Get the banner of a show.""" - def get_default_media_name(self): - return 'banner.png' - - def get_media_path(self): - if self.get_show(): - if self.media_format == 'normal': - return ImageCache().banner_path(self.indexer_id) - - if self.media_format == 'thumb': - return ImageCache().banner_thumb_path(self.indexer_id) - - return '' + default_media_name = 'banner.png' + + @property + def img_type(self): + """Get the image type (normal or thumbnail).""" + if self.media_format == 'normal': + return image_cache.BANNER + elif self.media_format == 'thumb': + return image_cache.BANNER_THUMB + else: + raise ValueError('Invalid media format') diff --git a/medusa/media/fan_art.py b/medusa/media/fan_art.py index 4e3bd2cd25..01da3a3b42 100644 --- a/medusa/media/fan_art.py +++ b/medusa/media/fan_art.py @@ -16,18 +16,12 @@ # You should have received a copy of the GNU General Public License # along with Medusa. If not, see . -from .generic import GenericMedia -from ..image_cache import ImageCache +from medusa import image_cache +from medusa.media.generic import GenericMedia class ShowFanArt(GenericMedia): """Get the fan art of a show.""" - def get_default_media_name(self): - return 'fanart.png' - - def get_media_path(self): - if self.get_show(): - return ImageCache().fanart_path(self.indexer_id) - - return '' + img_type = image_cache.FANART + default_media_name = 'fanart.png' diff --git a/medusa/media/generic.py b/medusa/media/generic.py index 4ed828a89a..a1d6be8f59 100644 --- a/medusa/media/generic.py +++ b/medusa/media/generic.py @@ -16,19 +16,25 @@ # You should have received a copy of the GNU General Public License # along with Medusa. If not, see . -from abc import abstractmethod from mimetypes import guess_type from os.path import isfile, join, normpath -from .. import app -from ..helper.common import try_int -from ..helper.exceptions import MultipleShowObjectsException -from ..show.show import Show +from medusa import app, image_cache +from medusa.helper.common import try_int +from medusa.helper.exceptions import MultipleShowObjectsException +from medusa.show.show import Show class GenericMedia(object): + """Base class for series media.""" + + img_type = None + default_media_name = '' + def __init__(self, indexer_id, media_format='normal'): """ + Initialize media for a series. + :param indexer_id: The indexer id of the show :param media_format: The format of the media to get. Must be either 'normal' or 'thumb' """ @@ -40,20 +46,11 @@ def __init__(self, indexer_id, media_format='normal'): else: self.media_format = 'normal' - @abstractmethod - def get_default_media_name(self): - """ - :return: The name of the file to use as a fallback if the show media file is missing - """ - - return '' - - def get_media(self): - """ - :return: The content of the desired media file - """ + @property + def media(self): + """Get the contents of the desired media file.""" - static_media_path = self.get_static_media_path() + static_media_path = self.static_media_path if isfile(static_media_path): with open(static_media_path, 'rb') as content: @@ -61,55 +58,46 @@ def get_media(self): return None - @abstractmethod - def get_media_path(self): - """ - :return: The path to the media related to ``self.indexer_id`` - """ - - return '' + @property + def media_path(self): + """Get the relative path to the media.""" + if self.series: + return image_cache.get_path(self.img_type, self.indexer_id) + else: + return '' @staticmethod def get_media_root(): - """ - :return: The root folder containing the media - """ - + """Get the root folder containing the media.""" return join(app.PROG_DIR, 'static') - def get_media_type(self): - """ - :return: The mime type of the current media - """ - - static_media_path = self.get_static_media_path() + @property + def media_type(self): + """Get the mime type of the current media.""" + static_media_path = self.static_media_path if isfile(static_media_path): return guess_type(static_media_path)[0] return '' - def get_show(self): - """ - :return: The show object associated with ``self.indexer_id`` or ``None`` - """ - + @property + def series(self): + """Find the series by indexer id.""" try: return Show.find(app.showList, self.indexer_id) except MultipleShowObjectsException: return None - def get_static_media_path(self): - """ - :return: The full path to the media - """ - - if self.get_show(): - media_path = self.get_media_path() + @property + def static_media_path(self): + """Get the full path to the media.""" + if self.series: + media_path = self.media_path if isfile(media_path): return normpath(media_path) - image_path = join(self.get_media_root(), 'images', self.get_default_media_name()) + image_path = join(self.get_media_root(), 'images', self.default_media_name) return image_path.replace('\\', '/') diff --git a/medusa/media/network_logo.py b/medusa/media/network_logo.py index bdc7cc8cfa..6578cd6d20 100644 --- a/medusa/media/network_logo.py +++ b/medusa/media/network_logo.py @@ -18,19 +18,22 @@ from os.path import join -from .generic import GenericMedia +from medusa.media.generic import GenericMedia class ShowNetworkLogo(GenericMedia): """Get the network logo of a show.""" - def get_default_media_name(self): + @property + def default_media_name(self): + """Get default icon for Network missing a logo.""" return join('network', 'nonetwork.png') - def get_media_path(self): - show = self.get_show() - - if show: - return join(self.get_media_root(), 'images', 'network', show.network_logo_name + '.png') - - return '' + @property + def media_path(self): + """Get the relative path to the media.""" + series = self.series + if series: + return join(self.get_media_root(), 'images', 'network', series.network_logo_name + '.png') + else: + return '' diff --git a/medusa/media/poster.py b/medusa/media/poster.py index 8a62dcfa8a..3b2e780072 100644 --- a/medusa/media/poster.py +++ b/medusa/media/poster.py @@ -16,22 +16,21 @@ # You should have received a copy of the GNU General Public License # along with Medusa. If not, see . -from .generic import GenericMedia -from ..image_cache import ImageCache +from medusa import image_cache +from medusa.media.generic import GenericMedia class ShowPoster(GenericMedia): """Get the poster of a show.""" - def get_default_media_name(self): - return 'poster.png' - - def get_media_path(self): - if self.get_show(): - if self.media_format == 'normal': - return ImageCache().poster_path(self.indexer_id) - - if self.media_format == 'thumb': - return ImageCache().poster_thumb_path(self.indexer_id) - - return '' + default_media_name = 'poster.png' + + @property + def img_type(self): + """Get the image type (normal or thumbnail).""" + if self.media_format == 'normal': + return image_cache.POSTER + elif self.media_format == 'thumb': + return image_cache.POSTER_THUMB + else: + raise ValueError('Invalid media format') diff --git a/medusa/server/api/v1/core.py b/medusa/server/api/v1/core.py index e99bbac444..c84621def9 100644 --- a/medusa/server/api/v1/core.py +++ b/medusa/server/api/v1/core.py @@ -145,8 +145,8 @@ def get(self, *args, **kwargs): pass def _out_as_image(self, _dict): - self.set_header('Content-Type', _dict['image'].get_media_type()) - return _dict['image'].get_media() + self.set_header('Content-Type', _dict['image'].media_type) + return _dict['image'].media def _out_as_json(self, _dict): self.set_header('Content-Type', 'application/json;charset=UTF-8') @@ -2180,25 +2180,24 @@ def __init__(self, args, kwargs): ApiCall.__init__(self, args, kwargs) def run(self): - """ Check Medusa's cache to see if the images (poster, banner, fanart) for a show are valid """ - show_obj = Show.find(app.showList, int(self.indexerid)) - if not show_obj: + """Check cache to see if the images for a show are valid.""" + # TODO: Add support for additional types + series_obj = Show.find(app.showList, int(self.indexerid)) + if not series_obj: return _responds(RESULT_FAILURE, msg='Show not found') # TODO: catch if cache dir is missing/invalid.. so it doesn't break show/show.cache # return {"poster": 0, "banner": 0} - cache_obj = image_cache.ImageCache() - - has_poster = 0 - has_banner = 0 + series = series_obj.indexerid + image_types = image_cache.IMAGE_TYPES - if os.path.isfile(cache_obj.poster_path(show_obj.indexerid)): - has_poster = 1 - if os.path.isfile(cache_obj.banner_path(show_obj.indexerid)): - has_banner = 1 + results = { + image_types[img]: 1 if image_cache.get_artwork(img, series) else 0 + for img in image_types + } - return _responds(RESULT_SUCCESS, {'poster': has_poster, 'banner': has_banner}) + return _responds(RESULT_SUCCESS, results) class CMD_ShowDelete(ApiCall): diff --git a/medusa/server/api/v2/series_asset.py b/medusa/server/api/v2/series_asset.py index e397bc2642..bbec0676c6 100644 --- a/medusa/server/api/v2/series_asset.py +++ b/medusa/server/api/v2/series_asset.py @@ -33,4 +33,4 @@ def get(self, series_slug, identifier, *args, **kwargs): if not asset: return self._not_found('Asset not found') - self._ok(stream=asset.get_media(), content_type=asset.get_media_type()) + self._ok(stream=asset.media, content_type=asset.media_type) diff --git a/medusa/server/web/manage/handler.py b/medusa/server/web/manage/handler.py index 943be458c3..df7c2f39fa 100644 --- a/medusa/server/web/manage/handler.py +++ b/medusa/server/web/manage/handler.py @@ -7,27 +7,39 @@ import os import re -from tornroutes import route - -from ..core import PageTemplate, WebRoot -from ..home import Home -from .... import app, db, helpers, logger, network_timezones, sbdatetime, subtitles, ui -from ....common import ( - Overview, Quality, SNATCHED, +from medusa import ( + app, + db, + helpers, + image_cache, + logger, + network_timezones, + sbdatetime, + subtitles, + ui, +) +from medusa.common import ( + Overview, + Quality, + SNATCHED, ) -from ....helper.common import ( - episode_num, try_int, +from medusa.helper.common import ( + episode_num, + try_int, ) -from ....helper.exceptions import ( +from medusa.helper.exceptions import ( CantRefreshShowException, CantUpdateShowException, ) -from ....helpers import is_media_file -from ....image_cache import ImageCache -from ....network_timezones import app_timezone -from ....post_processor import PostProcessor -from ....show.show import Show -from ....tv import Episode +from medusa.helpers import is_media_file +from medusa.network_timezones import app_timezone +from medusa.post_processor import PostProcessor +from medusa.server.web.core import PageTemplate, WebRoot +from medusa.server.web.home import Home +from medusa.show.show import Show +from medusa.tv import Episode + +from tornroutes import route @route('/manage(/?.*)') @@ -716,7 +728,6 @@ def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None subtitles.append(show_obj.name) if cur_show_id in to_image_update: - image_cache = ImageCache() image_cache.replace_images(show_obj) if errors: diff --git a/medusa/tv/series.py b/medusa/tv/series.py index 3ee715b38c..8f60956b17 100644 --- a/medusa/tv/series.py +++ b/medusa/tv/series.py @@ -65,7 +65,6 @@ ) from medusa.helper.mappings import NonEmptyDict from medusa.helpers.externals import get_externals, load_externals_from_db -from medusa.image_cache import ImageCache from medusa.indexers.indexer_api import indexerApi from medusa.indexers.indexer_config import ( INDEXER_TVRAGE, @@ -521,16 +520,14 @@ def airs(self, value): @property def poster(self): """Return poster path.""" - poster = ImageCache.poster_path(self.indexerid) - if os.path.isfile(poster): - return poster + img_type = image_cache.POSTER + return image_cache.get_artwork(img_type, self.indexerid) @property def banner(self): """Return banner path.""" - banner = ImageCache.banner_path(self.indexerid) - if os.path.isfile(banner): - return banner + img_type = image_cache.POSTER + return image_cache.get_artwork(img_type, self.indexerid) @property def aliases(self): @@ -1726,11 +1723,9 @@ def delete_show(self, full=False): def populate_cache(self): """Populate image caching.""" - cache_inst = image_cache.ImageCache() - log.debug(u'{id}: Checking & filling cache for show {show}', {'id': self.indexerid, 'show': self.name}) - cache_inst.fill_cache(self) + image_cache.fill_cache(self) def refresh_dir(self): """Refresh show using its location. @@ -2318,7 +2313,7 @@ def delete(self, remove_files): def remove_images(self): """Remove images from cache.""" - image_cache.ImageCache().remove_images(self) + image_cache.remove_images(self) def get_asset(self, asset_type): """Get the specified asset for this series.""" diff --git a/tests/legacy/media/generic_media_tests.py b/tests/legacy/media/generic_media_tests.py index cfeca95dbd..1613fd1416 100644 --- a/tests/legacy/media/generic_media_tests.py +++ b/tests/legacy/media/generic_media_tests.py @@ -100,13 +100,13 @@ def test___init__(self): self.assertEqual(generic_media.indexer_id, expected_indexer_id) self.assertEqual(generic_media.media_format, expected_media_format) - def test_get_default_media_name(self): - self.assertEqual(GenericMedia(0, '').get_default_media_name(), '') + def test_default_media_name(self): + self.assertEqual(GenericMedia(0, '').default_media_name, '') - def test_get_media_path(self): - self.assertEqual(GenericMedia(0, '').get_media_path(), '') + def test_media_path(self): + self.assertEqual(GenericMedia(0, '').media_path, '') - def test_get_media_root(self): + def test_media_root(self): app.PROG_DIR = os.path.join('some', 'path', 'to', 'application') self.assertEqual(GenericMedia.get_media_root(), os.path.join('some', 'path', 'to', 'application', 'static')) diff --git a/tests/legacy/media/show_banner_tests.py b/tests/legacy/media/show_banner_tests.py index 36fb1f5934..1cb98211b3 100644 --- a/tests/legacy/media/show_banner_tests.py +++ b/tests/legacy/media/show_banner_tests.py @@ -25,5 +25,5 @@ class ShowBannerTests(GenericMediaTests): """Test ShowBanner.""" - def test_get_default_media_name(self): - self.assertEqual(ShowBanner(0, '').get_default_media_name(), 'banner.png') + def test_default_media_name(self): + self.assertEqual(ShowBanner(0, '').default_media_name, 'banner.png') diff --git a/tests/legacy/media/show_fan_art_tests.py b/tests/legacy/media/show_fan_art_tests.py index 217fdab904..3d52ba3a29 100644 --- a/tests/legacy/media/show_fan_art_tests.py +++ b/tests/legacy/media/show_fan_art_tests.py @@ -25,5 +25,5 @@ class ShowFanArtTests(GenericMediaTests): """Test ShowFanArt.""" - def test_get_default_media_name(self): - self.assertEqual(ShowFanArt(0, '').get_default_media_name(), 'fanart.png') + def test_default_media_name(self): + self.assertEqual(ShowFanArt(0, '').default_media_name, 'fanart.png') diff --git a/tests/legacy/media/show_network_logo_tests.py b/tests/legacy/media/show_network_logo_tests.py index f4f225e627..404a1f1899 100644 --- a/tests/legacy/media/show_network_logo_tests.py +++ b/tests/legacy/media/show_network_logo_tests.py @@ -27,5 +27,5 @@ class ShowNetworkLogoTests(GenericMediaTests): """Test ShowNetworkLogo.""" - def test_get_default_media_name(self): - self.assertEqual(ShowNetworkLogo(0, '').get_default_media_name(), os.path.join('network', 'nonetwork.png')) + def test_default_media_name(self): + self.assertEqual(ShowNetworkLogo(0, '').default_media_name, os.path.join('network', 'nonetwork.png')) diff --git a/tests/legacy/media/show_poster_tests.py b/tests/legacy/media/show_poster_tests.py index c9a24eb6fb..29eb9b798b 100644 --- a/tests/legacy/media/show_poster_tests.py +++ b/tests/legacy/media/show_poster_tests.py @@ -25,5 +25,5 @@ class ShowPosterTests(GenericMediaTests): """Test ShowPoster.""" - def test_get_default_media_name(self): - self.assertEqual(ShowPoster(0, '').get_default_media_name(), 'poster.png') + def test_default_media_name(self): + self.assertEqual(ShowPoster(0, '').default_media_name, 'poster.png') From 69032881335a08f359e5e08f5e92419aa187e97d Mon Sep 17 00:00:00 2001 From: Fernando Date: Fri, 29 Dec 2017 20:01:38 -0200 Subject: [PATCH 14/35] Ignore folders started with # (DSM) (#3552) --- medusa/process_tv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/process_tv.py b/medusa/process_tv.py index 805d3e65c7..1fc09b0416 100644 --- a/medusa/process_tv.py +++ b/medusa/process_tv.py @@ -27,7 +27,7 @@ class ProcessResult(object): - IGNORED_FOLDERS = ('@eaDir',) + IGNORED_FOLDERS = ('@eaDir', '#recycle',) def __init__(self, path, process_method=None): From 1044a890f5b6c21fe77a84a51b4f4babcbb3b0a7 Mon Sep 17 00:00:00 2001 From: supergonkas Date: Sat, 30 Dec 2017 19:24:59 +0000 Subject: [PATCH 15/35] Remove hounddawgs provider --- medusa/providers/torrent/html/hounddawgs.py | 222 -------------------- 1 file changed, 222 deletions(-) delete mode 100644 medusa/providers/torrent/html/hounddawgs.py diff --git a/medusa/providers/torrent/html/hounddawgs.py b/medusa/providers/torrent/html/hounddawgs.py deleted file mode 100644 index 19962f8ff9..0000000000 --- a/medusa/providers/torrent/html/hounddawgs.py +++ /dev/null @@ -1,222 +0,0 @@ -# coding=utf-8 - -"""Provider code for HoundDawgs.""" - -from __future__ import unicode_literals - -import logging -import re -import traceback - -from medusa import tv -from medusa.bs4_parser import BS4Parser -from medusa.helper.common import ( - convert_size, - try_int, -) -from medusa.logger.adapters.style import BraceAdapter -from medusa.providers.torrent.torrent_provider import TorrentProvider - -from requests.compat import urljoin -from requests.utils import dict_from_cookiejar - -log = BraceAdapter(logging.getLogger(__name__)) -log.logger.addHandler(logging.NullHandler()) - - -class HoundDawgsProvider(TorrentProvider): - """HoundDawgs Torrent provider.""" - - def __init__(self): - """Initialize the class.""" - super(HoundDawgsProvider, self).__init__('HoundDawgs') - - # Credentials - self.username = None - self.password = None - - # URLs - self.url = 'https://hounddawgs.org' - self.urls = { - 'base_url': self.url, - 'search': urljoin(self.url, 'torrents.php'), - 'login': urljoin(self.url, 'login.php'), - } - - # Proper Strings - - # Miscellaneous Options - self.freeleech = None - self.ranked = None - - # Torrent Stats - self.minseed = None - self.minleech = None - - # Cache - self.cache = tv.Cache(self) - - def search(self, search_strings, age=0, ep_obj=None, **kwargs): - """ - Search a provider and parse the results. - - :param search_strings: A dict with mode (key) and the search value (value) - :param age: Not used - :param ep_obj: Not used - :returns: A list of search results (structure) - """ - results = [] - if not self.login(): - return results - - # Search Params - search_params = { - 'filter_cat[85]': 1, - 'filter_cat[58]': 1, - 'filter_cat[57]': 1, - 'filter_cat[74]': 1, - 'filter_cat[92]': 1, - 'filter_cat[93]': 1, - 'order_by': 's3', - 'order_way': 'desc', - 'type': '', - 'userid': '', - 'searchstr': '', - 'searchimdb': '', - 'searchtags': '' - } - - for mode in search_strings: - log.debug('Search mode: {0}', mode) - - for search_string in search_strings[mode]: - - if mode != 'RSS': - log.debug('Search string: {search}', - {'search': search_string}) - if self.ranked: - log.debug('Searching only ranked torrents') - - search_params['searchstr'] = search_string - response = self.session.get(self.urls['search'], params=search_params) - if not response or not response.text: - log.debug('No data returned from provider') - continue - if not response.text: - continue - - results += self.parse(response.text, mode) - - return results - - def parse(self, data, mode): - """ - Parse search results for items. - - :param data: The raw response from a search - :param mode: The current mode used to search, e.g. RSS - - :return: A list of items found - """ - items = [] - - with BS4Parser(data, 'html5lib') as html: - torrent_table = html.find('table', {'id': 'torrent_table'}) - - # Continue only if at least one release is found - if not torrent_table: - log.debug('Data returned from provider does not contain any {0}torrents', - 'ranked ' if self.ranked else '') - return items - - torrent_body = torrent_table.find('tbody') - torrent_rows = torrent_body.contents - del torrent_rows[1::2] - - for row in torrent_rows[1:]: - try: - torrent = row('td') - if len(torrent) <= 1: - break - - all_as = (torrent[1])('a') - notinternal = row.find('img', src='/static//common/user_upload.png') - if self.ranked and notinternal: - log.debug('Found a user uploaded release, Ignoring it..') - continue - - freeleech = row.find('img', src='/static//common/browse/freeleech.png') - if self.freeleech and not freeleech: - continue - - title = all_as[2].string - download_url = urljoin(self.url, all_as[0].attrs['href']) - if not all([title, download_url]): - continue - - seeders = try_int((row('td')[6]).text.replace(',', '')) - leechers = try_int((row('td')[7]).text.replace(',', '')) - - # Filter unseeded torrent - if seeders < min(self.minseed, 1): - if mode != 'RSS': - log.debug("Discarding torrent because it doesn't meet the" - " minimum seeders: {0}. Seeders: {1}", - title, seeders) - continue - - torrent_size = row.find('td', class_='nobr').find_next_sibling('td').string - if torrent_size: - size = convert_size(torrent_size) or -1 - - pubdate_raw = row.find('td', class_='nobr').find('span')['title'] - pubdate = self.parse_pubdate(pubdate_raw) - - item = { - 'title': title, - 'link': download_url, - 'size': size, - 'seeders': seeders, - 'leechers': leechers, - 'pubdate': pubdate, - } - if mode != 'RSS': - log.debug('Found result: {0} with {1} seeders and {2} leechers', - title, seeders, leechers) - - items.append(item) - except (AttributeError, TypeError, KeyError, ValueError, IndexError): - log.error('Failed parsing provider. Traceback: {0!r}', - traceback.format_exc()) - - return items - - def login(self): - """Login method used for logging in before doing search and torrent downloads.""" - if any(dict_from_cookiejar(self.session.cookies).values()): - return True - - login_params = { - 'username': self.username, - 'password': self.password, - 'keeplogged': 'on', - 'login': 'Login' - } - - # Initialize session with a GET to have cookies - self.session.get(self.urls['base_url']) - response = self.session.post(self.urls['login'], data=login_params) - if not response or not response.text: - log.warning('Unable to connect to provider') - return False - - if any([re.search('Dit brugernavn eller kodeord er forkert.', response.text), - re.search('Login :: HoundDawgs', response.text), - re.search('Dine cookies er ikke aktiveret.', response.text)], ): - log.warning('Invalid username or password. Check your settings') - return False - - return True - - -provider = HoundDawgsProvider() From 76bee9469a09d90f730af3229701ad126e40643a Mon Sep 17 00:00:00 2001 From: supergonkas Date: Sat, 30 Dec 2017 19:26:16 +0000 Subject: [PATCH 16/35] Update __init__.py --- medusa/providers/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/medusa/providers/__init__.py b/medusa/providers/__init__.py index e99feb01a5..77ced0e7a0 100644 --- a/medusa/providers/__init__.py +++ b/medusa/providers/__init__.py @@ -30,7 +30,6 @@ hdtorrents, hebits, horriblesubs, - hounddawgs, iptorrents, limetorrents, morethantv, @@ -62,7 +61,7 @@ ) __all__ = [ - 'btn', 'thepiratebay', 'torrentleech', 'hdtorrents', 'torrentday', 'hdbits', 'hounddawgs', + 'btn', 'thepiratebay', 'torrentleech', 'hdtorrents', 'torrentday', 'hdbits', 'speedcd', 'nyaa', 'torrentbytes', 'torrent9', 'morethantv', 'tokyotoshokan', 'iptorrents', 'hebits', 'alpharatio', 'sdbits', 'shazbat', 'rarbg', 'tntvillage', 'binsearch', 'xthor', 'abnormal', 'scenetime', 'nebulance', 'tvchaosuk', 'torrentproject', 'bitcannon', 'torrentz2', 'pretome', 'gftracker', 'anizb', From 2d561beffaf3853452d91e9fe1d2d1668dd7a0b3 Mon Sep 17 00:00:00 2001 From: supergonkas Date: Sat, 30 Dec 2017 19:26:40 +0000 Subject: [PATCH 17/35] Update __init__.py --- medusa/providers/torrent/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/medusa/providers/torrent/__init__.py b/medusa/providers/torrent/__init__.py index f7a690db3a..1a23c82cde 100644 --- a/medusa/providers/torrent/__init__.py +++ b/medusa/providers/torrent/__init__.py @@ -17,7 +17,6 @@ hdtorrents, hebits, horriblesubs, - hounddawgs, iptorrents, limetorrents, morethantv, @@ -62,7 +61,7 @@ __all__ = [ 'abnormal', 'alpharatio', 'animebytes', 'archetorrent', 'bithdtv', 'torrent9', 'danishbits', 'elitetorrent', - 'gftracker', 'hdspace', 'hdtorrents', 'hounddawgs', 'iptorrents', 'limetorrents', 'morethantv', + 'gftracker', 'hdspace', 'hdtorrents', 'iptorrents', 'limetorrents', 'morethantv', 'newpct', 'pretome', 'sdbits', 'scenetime', 'speedcd', 'thepiratebay', 'tntvillage', 'tokyotoshokan', 'torrentbytes', 'torrentleech', 'nebulance', 'tvchaosuk', 'xthor', 'zooqle', 'bitcannon', 'btn', 'hd4free', 'hdbits', 'norbits', 'rarbg', 'torrentday', 'torrentproject', 'nyaa', 'rsstorrent', 'shazbat', 'hebits', From 3872cde4239a136edb5b1fa68ae87a5c42c65b76 Mon Sep 17 00:00:00 2001 From: h3llrais3r Date: Sat, 30 Dec 2017 20:49:43 +0100 Subject: [PATCH 18/35] Add option to split home page in tabs (#3548) --- medusa/__main__.py | 2 + medusa/app.py | 1 + medusa/server/api/v2/config.py | 1 + medusa/server/web/config/anime.py | 3 +- static/js/common/init.js | 7 + tests/apiv2/test_config.py | 1 + views/config_anime.mako | 9 + views/home.mako | 18 +- views/partials/home/banner.mako | 349 +++++++++++++++--------------- views/partials/home/poster.mako | 11 +- views/partials/home/simple.mako | 329 ++++++++++++++-------------- views/partials/home/small.mako | 349 +++++++++++++++--------------- 12 files changed, 571 insertions(+), 509 deletions(-) diff --git a/medusa/__main__.py b/medusa/__main__.py index 086d673836..5887258c20 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -889,6 +889,7 @@ def initialize(self, console_logging=True): app.ANIDB_PASSWORD = check_setting_str(app.CFG, 'ANIDB', 'anidb_password', '', censor_log='low') app.ANIDB_USE_MYLIST = bool(check_setting_int(app.CFG, 'ANIDB', 'anidb_use_mylist', 0)) app.ANIME_SPLIT_HOME = bool(check_setting_int(app.CFG, 'ANIME', 'anime_split_home', 0)) + app.ANIME_SPLIT_HOME_IN_TABS = bool(check_setting_int(app.CFG, 'ANIME', 'anime_split_home_in_tabs', 0)) app.METADATA_KODI = check_setting_list(app.CFG, 'General', 'metadata_kodi', ['0'] * 10, transform=int) app.METADATA_KODI_12PLUS = check_setting_list(app.CFG, 'General', 'metadata_kodi_12plus', ['0'] * 10, transform=int) @@ -1871,6 +1872,7 @@ def save_config(): new_config['ANIME'] = {} new_config['ANIME']['anime_split_home'] = int(app.ANIME_SPLIT_HOME) + new_config['ANIME']['anime_split_home_in_tabs'] = int(app.ANIME_SPLIT_HOME_IN_TABS) new_config.write() diff --git a/medusa/app.py b/medusa/app.py index c7bc37c4a7..2ad87983c1 100644 --- a/medusa/app.py +++ b/medusa/app.py @@ -425,6 +425,7 @@ ANIDB_USE_MYLIST = False ADBA_CONNECTION = None ANIME_SPLIT_HOME = False +ANIME_SPLIT_HOME_IN_TABS = False USE_SYNOINDEX = False diff --git a/medusa/server/api/v2/config.py b/medusa/server/api/v2/config.py index ac7b054c29..e4d365d942 100644 --- a/medusa/server/api/v2/config.py +++ b/medusa/server/api/v2/config.py @@ -80,6 +80,7 @@ def get(self, identifier, path_param=None): config_data = NonEmptyDict() config_data['anonRedirect'] = app.ANON_REDIRECT config_data['animeSplitHome'] = app.ANIME_SPLIT_HOME + config_data['animeSplitHomeInTabs'] = app.ANIME_SPLIT_HOME_IN_TABS config_data['comingEpsSort'] = app.COMING_EPS_SORT config_data['datePreset'] = app.DATE_PRESET config_data['fuzzyDating'] = app.FUZZY_DATING diff --git a/medusa/server/web/config/anime.py b/medusa/server/web/config/anime.py index b45589bae2..f8ac153913 100644 --- a/medusa/server/web/config/anime.py +++ b/medusa/server/web/config/anime.py @@ -32,7 +32,7 @@ def index(self): controller='config', action='anime') def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None, - split_home=None): + split_home=None, split_home_in_tabs=None): """ Save anime related settings """ @@ -44,6 +44,7 @@ def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, an app.ANIDB_PASSWORD = anidb_password app.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist) app.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home) + app.ANIME_SPLIT_HOME_IN_TABS = config.checkbox_to_value(split_home_in_tabs) app.instance.save_config() diff --git a/static/js/common/init.js b/static/js/common/init.js index e6d1ad61b9..dbe90a604c 100644 --- a/static/js/common/init.js +++ b/static/js/common/init.js @@ -2,6 +2,13 @@ MEDUSA.common.init = function() { // Import underscore.string using it's mixin export. _.mixin(s.exports()); + // Reset the layout for the activated tab (when using ui tabs) + $('#showTabs').tabs({ + activate: function() { + $('.show-grid').isotope('layout'); + } + }); + // Background Fanart Functions if (MEDUSA.config.fanartBackground) { var seriesId = $('#series-id').attr('value'); diff --git a/tests/apiv2/test_config.py b/tests/apiv2/test_config.py index f5f2351565..6b69771910 100644 --- a/tests/apiv2/test_config.py +++ b/tests/apiv2/test_config.py @@ -22,6 +22,7 @@ def config(monkeypatch, app_config): config_data = NonEmptyDict() config_data['anonRedirect'] = app.ANON_REDIRECT config_data['animeSplitHome'] = app.ANIME_SPLIT_HOME + config_data['animeSplitHomeInTabs'] = app.ANIME_SPLIT_HOME_IN_TABS config_data['comingEpsSort'] = app.COMING_EPS_SORT config_data['datePreset'] = app.DATE_PRESET config_data['fuzzyDating'] = app.FUZZY_DATING diff --git a/views/config_anime.mako b/views/config_anime.mako index a943a42a16..e04172ccbf 100644 --- a/views/config_anime.mako +++ b/views/config_anime.mako @@ -78,6 +78,15 @@ Separate anime and normal shows in groups +
+
+ + +
+
diff --git a/views/home.mako b/views/home.mako index 4a2b48e375..7a9b279d18 100644 --- a/views/home.mako +++ b/views/home.mako @@ -94,7 +94,23 @@
- <%include file="/partials/home/${app.HOME_LAYOUT}.mako"/> + % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: + +
+ + + +
+ <%include file="/partials/home/${app.HOME_LAYOUT}.mako"/> +
+
+ % else: + <%include file="/partials/home/${app.HOME_LAYOUT}.mako"/> + % endif
diff --git a/views/partials/home/banner.mako b/views/partials/home/banner.mako index 8cf7fdc327..1d6a6ed315 100644 --- a/views/partials/home/banner.mako +++ b/views/partials/home/banner.mako @@ -13,181 +13,188 @@ % for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if len(show_lists) > 1: -

${cur_list_type}

+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % elif len(show_lists) > 1: +

${cur_list_type}

% endif -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - % if app.show_queue_scheduler.action.loadingShowList: - - % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: - <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: - continue - %> - - - - - - - - - - % endfor - - % endif - - <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> - % for cur_show in my_show_list: - <% - cur_airs_next = '' - cur_airs_prev = '' - cur_snatched = 0 - cur_downloaded = 0 - cur_total = 0 - show_size = 0 - download_stat_tip = '' - if cur_show.indexerid in show_stat: - cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] - cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] - cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] - if not cur_snatched: +
+
Next EpPrev EpShowNetworkIndexerQualityDownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) - % if cur_loading_show.show is None: - Loading... (${cur_loading_show.show_name}) - % else: - ${cur_loading_show.show.name} - % endif -
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % if app.show_queue_scheduler.action.loadingShowList: + + % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: + <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: + continue + %> + + + + + + + + + + % endfor + + % endif + + <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> + % for cur_show in my_show_list: + <% + cur_airs_next = '' + cur_airs_prev = '' cur_snatched = 0 - cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] - if not cur_downloaded: cur_downloaded = 0 - cur_total = show_stat[cur_show.indexerid]['ep_total'] - if not cur_total: cur_total = 0 - show_size = show_stat[cur_show.indexerid]['show_size'] - download_stat = str(cur_downloaded) - download_stat_tip = "Downloaded: " + str(cur_downloaded) - if cur_snatched: - download_stat = download_stat + "+" + str(cur_snatched) - download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) - download_stat = download_stat + " / " + str(cur_total) - download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) - nom = cur_downloaded - if cur_total: - den = cur_total - else: - den = 1 - download_stat_tip = "Unaired" - progressbar_percent = nom * 100 / den - %> - - % if cur_airs_next: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - % if cur_airs_prev: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - - - + % if cur_airs_next: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - % if cur_show.externals.get('trakt_id'): - - [trakt] - + % if cur_airs_prev: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - - ${indexerApi(cur_show.indexer).name} - - - - - - - - - - % endfor - -
Next EpPrev EpShowNetworkIndexerQualityDownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) + % if cur_loading_show.show is None: + Loading... (${cur_loading_show.show_name}) + % else: + ${cur_loading_show.show.name} + % endif +
- - - - - ${cur_show.name} - - - % if cur_show.network: - ${cur_show.network} - ${cur_show.network} - % else: - No Network - No Network - % endif - - % if cur_show.imdb_id: - - [imdb] - + show_size = 0 + download_stat_tip = '' + if cur_show.indexerid in show_stat: + cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] + cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] + cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] + if not cur_snatched: + cur_snatched = 0 + cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] + if not cur_downloaded: + cur_downloaded = 0 + cur_total = show_stat[cur_show.indexerid]['ep_total'] + if not cur_total: + cur_total = 0 + show_size = show_stat[cur_show.indexerid]['show_size'] + download_stat = str(cur_downloaded) + download_stat_tip = "Downloaded: " + str(cur_downloaded) + if cur_snatched: + download_stat = download_stat + "+" + str(cur_snatched) + download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) + download_stat = download_stat + " / " + str(cur_total) + download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) + nom = cur_downloaded + if cur_total: + den = cur_total + else: + den = 1 + download_stat_tip = "Unaired" + progressbar_percent = nom * 100 / den + %> +
+ + + + ${renderQualityPill(cur_show.quality, showTitle=True)} - ## This first span is used for sorting and is never displayed to user - ${download_stat} -
- ${download_stat} -
${pretty_file_size(show_size)} - <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> - ${('No', 'Yes')[bool(paused)]} - - ${cur_show.status} - - <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> - ${('No', 'Yes')[have_xem]} -
+ + ${cur_show.name} + + + + % if cur_show.network: + ${cur_show.network} + ${cur_show.network} + % else: + No Network + No Network + % endif + + + % if cur_show.imdb_id: + + [imdb] + + % endif + % if cur_show.externals.get('trakt_id'): + + [trakt] + + % endif + + ${indexerApi(cur_show.indexer).name} + + + ${renderQualityPill(cur_show.quality, showTitle=True)} + + ## This first span is used for sorting and is never displayed to user + ${download_stat} +
+ ${download_stat} + + ${pretty_file_size(show_size)} + + <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> + ${('No', 'Yes')[bool(paused)]} + + + ${cur_show.status} + + + <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> + ${('No', 'Yes')[have_xem]} + + + % endfor + + + + % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: + + % endif % endfor diff --git a/views/partials/home/poster.mako b/views/partials/home/poster.mako index 506849d02e..a3573f48c2 100644 --- a/views/partials/home/poster.mako +++ b/views/partials/home/poster.mako @@ -14,9 +14,12 @@ % for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - + % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % endif
- % if len(show_lists) > 1: + % if not (app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS): + % if len(show_lists) > 1:
+ % endif % endif
% for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: @@ -157,5 +161,8 @@ % endfor
+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % endif % endfor diff --git a/views/partials/home/simple.mako b/views/partials/home/simple.mako index fa0f418f8a..f46ab6b4b0 100644 --- a/views/partials/home/simple.mako +++ b/views/partials/home/simple.mako @@ -10,176 +10,179 @@ import re %> <%namespace file="/inc_defs.mako" import="renderQualityPill"/> -
-
% for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if len(show_lists) > 1: -

${cur_list_type}

+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % elif len(show_lists) > 1: +

${cur_list_type}

% endif - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - % if app.show_queue_scheduler.action.loadingShowList: - - % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: - <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: - continue - %> - - - - - - - - - - - % endfor - - % endif - - <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> - % for cur_show in my_show_list: - <% - cur_airs_next = '' - cur_airs_prev = '' - cur_snatched = 0 - cur_downloaded = 0 - cur_total = 0 - show_size = 0 - download_stat_tip = '' - if cur_show.indexerid in show_stat: - cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] - cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] - cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] - if not cur_snatched: +
+
Next EpPrev EpShowNetworkIndexerQuality DownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) - % if cur_loading_show.show is None: - Loading... (${cur_loading_show.show_name}) - % else: - ${cur_loading_show.show.name} - % endif -
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % if app.show_queue_scheduler.action.loadingShowList: + + % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: + <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: + continue + %> + + + + + + + + + + + % endfor + + % endif + + <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> + % for cur_show in my_show_list: + <% + cur_airs_next = '' + cur_airs_prev = '' cur_snatched = 0 - cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] - if not cur_downloaded: cur_downloaded = 0 - cur_total = show_stat[cur_show.indexerid]['ep_total'] - if not cur_total: cur_total = 0 - show_size = show_stat[cur_show.indexerid]['show_size'] - download_stat = str(cur_downloaded) - download_stat_tip = "Downloaded: " + str(cur_downloaded) - if cur_snatched: - download_stat = download_stat + "+" + str(cur_snatched) - download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) - download_stat = download_stat + " / " + str(cur_total) - download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) - nom = cur_downloaded - if cur_total: - den = cur_total - else: - den = 1 - download_stat_tip = "Unaired" - progressbar_percent = nom * 100 / den - %> - - % if cur_airs_next: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - % if cur_airs_prev: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - - - + % if cur_airs_next: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - % if cur_show.externals.get('trakt_id'): - - [trakt] - + % if cur_airs_prev: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - - ${indexerApi(cur_show.indexer).name} - - - - - - - - - - % endfor - -
Next EpPrev EpShowNetworkIndexerQuality DownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) + % if cur_loading_show.show is None: + Loading... (${cur_loading_show.show_name}) + % else: + ${cur_loading_show.show.name} + % endif +
- - - - ${cur_show.name} - ${cur_show.network} - - % if cur_show.imdb_id: - - [imdb] - + show_size = 0 + download_stat_tip = '' + if cur_show.indexerid in show_stat: + cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] + cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] + cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] + if not cur_snatched: + cur_snatched = 0 + cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] + if not cur_downloaded: + cur_downloaded = 0 + cur_total = show_stat[cur_show.indexerid]['ep_total'] + if not cur_total: + cur_total = 0 + show_size = show_stat[cur_show.indexerid]['show_size'] + download_stat = str(cur_downloaded) + download_stat_tip = "Downloaded: " + str(cur_downloaded) + if cur_snatched: + download_stat = download_stat + "+" + str(cur_snatched) + download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) + download_stat = download_stat + " / " + str(cur_total) + download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) + nom = cur_downloaded + if cur_total: + den = cur_total + else: + den = 1 + download_stat_tip = "Unaired" + progressbar_percent = nom * 100 / den + %> +
+ + + + ${renderQualityPill(cur_show.quality, showTitle=True)} - ## This first span is used for sorting and is never displayed to user - ${download_stat} -
- ${download_stat} -
${pretty_file_size(show_size)} - <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> - ${('No', 'Yes')[bool(paused)]} - - ${cur_show.status} - - <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> - ${('No', 'Yes')[have_xem]} -
+ ${cur_show.name} + + ${cur_show.network} + + + % if cur_show.imdb_id: + + [imdb] + + % endif + % if cur_show.externals.get('trakt_id'): + + [trakt] + + % endif + + ${indexerApi(cur_show.indexer).name} + + + ${renderQualityPill(cur_show.quality, showTitle=True)} + + ## This first span is used for sorting and is never displayed to user + ${download_stat} +
+ ${download_stat} + + ${pretty_file_size(show_size)} + + <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> + ${('No', 'Yes')[bool(paused)]} + + + ${cur_show.status} + + + <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> + ${('No', 'Yes')[have_xem]} + + + % endfor + + +
+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % endif % endfor -
- diff --git a/views/partials/home/small.mako b/views/partials/home/small.mako index fcd84dfcf3..2437ca2b96 100644 --- a/views/partials/home/small.mako +++ b/views/partials/home/small.mako @@ -13,181 +13,188 @@ % for cur_show_list in show_lists: <% cur_list_type = cur_show_list[0] %> <% my_show_list = list(cur_show_list[1]) %> - % if len(show_lists) > 1: -

${cur_list_type}

+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: +
+ % elif len(show_lists) > 1: +

${cur_list_type}

% endif - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - % if app.show_queue_scheduler.action.loadingShowList: - - % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: - <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: - continue - %> - - - - - - - - - - % endfor - - % endif - - <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> - % for cur_show in my_show_list: - <% - cur_airs_next = '' - cur_airs_prev = '' - cur_snatched = 0 - cur_downloaded = 0 - cur_total = 0 - show_size = 0 - download_stat_tip = '' - if cur_show.indexerid in show_stat: - cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] - cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] - cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] - if not cur_snatched: +
+
Next EpPrev EpShowNetworkIndexerQualityDownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) - % if cur_loading_show.show is None: - Loading... (${cur_loading_show.show_name}) - % else: - ${cur_loading_show.show.name} - % endif -
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + % if app.show_queue_scheduler.action.loadingShowList: + + % for cur_loading_show in app.show_queue_scheduler.action.loadingShowList: + <% if cur_loading_show.show is not None and cur_loading_show.show in app.showList: + continue + %> + + + + + + + + + + % endfor + + % endif + + <% my_show_list.sort(lambda x, y: cmp(x.name, y.name)) %> + % for cur_show in my_show_list: + <% + cur_airs_next = '' + cur_airs_prev = '' cur_snatched = 0 - cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] - if not cur_downloaded: cur_downloaded = 0 - cur_total = show_stat[cur_show.indexerid]['ep_total'] - if not cur_total: cur_total = 0 - show_size = show_stat[cur_show.indexerid]['show_size'] - download_stat = str(cur_downloaded) - download_stat_tip = "Downloaded: " + str(cur_downloaded) - if cur_snatched: - download_stat = download_stat + "+" + str(cur_snatched) - download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) - download_stat = download_stat + " / " + str(cur_total) - download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) - nom = cur_downloaded - if cur_total: - den = cur_total - else: - den = 1 - download_stat_tip = "Unaired" - progressbar_percent = nom * 100 / den - %> - - % if cur_airs_next: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - % if cur_airs_prev: - <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> - % try: - - % except ValueError: - - % endtry - % else: - - % endif - - - + % if cur_airs_next: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_next, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - % if cur_show.externals.get('trakt_id'): - - [trakt] - + % if cur_airs_prev: + <% airDate = sbdatetime.sbdatetime.convert_to_setting(network_timezones.parse_date_time(cur_airs_prev, cur_show.airs, cur_show.network)) %> + % try: + + % except ValueError: + + % endtry + % else: + % endif - - ${indexerApi(cur_show.indexer).name} - - - - - - - - - - % endfor - -
Next EpPrev EpShowNetworkIndexerQualityDownloadsSizeActiveStatusXEM
Add ${('Show', 'Anime')[cur_list_type == 'Anime']}          
(loading) + % if cur_loading_show.show is None: + Loading... (${cur_loading_show.show_name}) + % else: + ${cur_loading_show.show.name} + % endif +
- - - - - - - % if cur_show.network: - ${cur_show.network} - ${cur_show.network} - % else: - No Network - No Network - % endif - - % if cur_show.imdb_id: - - [imdb] - + show_size = 0 + download_stat_tip = '' + if cur_show.indexerid in show_stat: + cur_airs_next = show_stat[cur_show.indexerid]['ep_airs_next'] + cur_airs_prev = show_stat[cur_show.indexerid]['ep_airs_prev'] + cur_snatched = show_stat[cur_show.indexerid]['ep_snatched'] + if not cur_snatched: + cur_snatched = 0 + cur_downloaded = show_stat[cur_show.indexerid]['ep_downloaded'] + if not cur_downloaded: + cur_downloaded = 0 + cur_total = show_stat[cur_show.indexerid]['ep_total'] + if not cur_total: + cur_total = 0 + show_size = show_stat[cur_show.indexerid]['show_size'] + download_stat = str(cur_downloaded) + download_stat_tip = "Downloaded: " + str(cur_downloaded) + if cur_snatched: + download_stat = download_stat + "+" + str(cur_snatched) + download_stat_tip = download_stat_tip + " " + "Snatched: " + str(cur_snatched) + download_stat = download_stat + " / " + str(cur_total) + download_stat_tip = download_stat_tip + " " + "Total: " + str(cur_total) + nom = cur_downloaded + if cur_total: + den = cur_total + else: + den = 1 + download_stat_tip = "Unaired" + progressbar_percent = nom * 100 / den + %> +
+ + + + ${renderQualityPill(cur_show.quality, showTitle=True)} - ## This first span is used for sorting and is never displayed to user - ${download_stat} -
- ${download_stat} -
${pretty_file_size(show_size)} - <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> - ${('No', 'Yes')[bool(paused)]} - - ${cur_show.status} - - <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> - ${('No', 'Yes')[have_xem]} -
+ + + + + % if cur_show.network: + ${cur_show.network} + ${cur_show.network} + % else: + No Network + No Network + % endif + + + % if cur_show.imdb_id: + + [imdb] + + % endif + % if cur_show.externals.get('trakt_id'): + + [trakt] + + % endif + + ${indexerApi(cur_show.indexer).name} + + + ${renderQualityPill(cur_show.quality, showTitle=True)} + + ## This first span is used for sorting and is never displayed to user + ${download_stat} +
+ ${download_stat} + + ${pretty_file_size(show_size)} + + <% paused = int(cur_show.paused) == 0 and cur_show.status == 'Continuing' %> + ${('No', 'Yes')[bool(paused)]} + + + ${cur_show.status} + + + <% have_xem = bool(get_xem_numbering_for_show(cur_show.indexerid, cur_show.indexer, refresh_data=False)) %> + ${('No', 'Yes')[have_xem]} + + + % endfor + + +
+ % if app.ANIME_SPLIT_HOME and app.ANIME_SPLIT_HOME_IN_TABS: + + % endif % endfor From e137e5c6398cbd95cf7e72d129adba5d1f41663b Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 31 Dec 2017 05:31:03 -0500 Subject: [PATCH 19/35] Fix BraceMessage KeyError (#3560) --- medusa/logger/adapters/style.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/medusa/logger/adapters/style.py b/medusa/logger/adapters/style.py index cf280f6a5e..2203709d9c 100644 --- a/medusa/logger/adapters/style.py +++ b/medusa/logger/adapters/style.py @@ -33,15 +33,15 @@ def __str__(self): args = [] kwargs = self.args[0] - msg = text_type(self.msg) - try: - return msg.format(*args, **kwargs) + return self.msg.format(*args, **kwargs) except IndexError: try: - return msg.format(kwargs) - except IndexError: - return msg + return self.msg.format(**kwargs) + except KeyError: + return self.msg + except KeyError: + return self.msg.format(*args) except Exception: log.error( 'BraceMessage string formatting failed. ' @@ -85,8 +85,9 @@ def log(self, level, msg, *args, **kwargs): """Log a message at the specified level using Brace-formatting.""" if self.isEnabledFor(level): msg, kwargs = self.process(msg, kwargs) - brace_msg = BraceMessage(msg, *args, **kwargs) - self.logger.log(level, brace_msg, **kwargs) + if not isinstance(msg, BraceMessage): + msg = BraceMessage(msg, *args, **kwargs) + self.logger.log(level, msg, **kwargs) def exception(self, msg, *args, **kwargs): """Add exception information before delegating to self.log.""" From 9622b91f886831f573582bb7a576cc806b9c93b2 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sun, 31 Dec 2017 05:32:33 -0500 Subject: [PATCH 20/35] Fix imports (#3558) --- medusa/__init__.py | 5 ++- medusa/__main__.py | 36 +++++++-------- medusa/auto_post_processor.py | 2 +- medusa/black_and_white_list.py | 2 +- medusa/classes.py | 2 +- medusa/common.py | 2 +- medusa/db.py | 7 +-- medusa/failed_history.py | 10 ++--- medusa/helper/common.py | 2 +- medusa/history.py | 6 +-- medusa/init/__init__.py | 8 ++-- medusa/naming.py | 6 +-- medusa/network_timezones.py | 9 ++-- medusa/notifiers/plex.py | 2 +- medusa/nzb_splitter.py | 10 ++--- medusa/sbdatetime.py | 4 +- medusa/scene_exceptions.py | 8 ++-- medusa/scene_numbering.py | 10 ++--- medusa/server/core.py | 32 ++++++++++--- medusa/server/web/__init__.py | 45 ++++++++++++++++--- medusa/server/web/config/__init__.py | 18 ++++---- medusa/server/web/config/anime.py | 11 +++-- medusa/server/web/config/backup_restore.py | 10 +++-- medusa/server/web/config/general.py | 22 ++++++--- medusa/server/web/config/handler.py | 10 +++-- medusa/server/web/config/notifications.py | 9 ++-- medusa/server/web/config/post_processing.py | 15 +++++-- medusa/server/web/config/search.py | 14 ++++-- medusa/server/web/config/subtitles.py | 13 ++++-- medusa/server/web/core/__init__.py | 24 +++++++--- medusa/server/web/core/authentication.py | 13 +++++- medusa/server/web/core/base.py | 33 ++++++++++---- medusa/server/web/core/calendar.py | 8 ++-- medusa/server/web/core/file_browser.py | 5 ++- medusa/server/web/core/history.py | 9 ++-- medusa/server/web/home/__init__.py | 14 +++--- medusa/server/web/home/add_recommended.py | 5 ++- medusa/server/web/home/add_shows.py | 34 +++++++------- medusa/server/web/home/change_log.py | 10 +++-- medusa/server/web/home/irc.py | 5 ++- medusa/server/web/home/news.py | 8 ++-- medusa/server/web/home/post_process.py | 11 +++-- medusa/server/web/manage/__init__.py | 4 +- medusa/server/web/manage/searches.py | 7 +-- medusa/session/factory.py | 2 +- medusa/show/coming_episodes.py | 17 ++++--- medusa/show/history.py | 7 +-- medusa/show/recommendations/recommended.py | 7 ++- medusa/show/show.py | 17 +++++-- medusa/show_updater.py | 15 +++---- medusa/subtitles.py | 23 +++++----- medusa/torrent_checker.py | 2 +- medusa/ui.py | 4 +- tests/legacy/db_tests.py | 2 +- tests/legacy/media/show_banner_tests.py | 2 +- tests/legacy/media/show_fan_art_tests.py | 2 +- tests/legacy/media/show_network_logo_tests.py | 2 +- tests/legacy/media/show_poster_tests.py | 2 +- tests/legacy/notifier_tests.py | 2 +- tests/legacy/pp_tests.py | 2 +- tests/legacy/providers/nzb_provider_tests.py | 4 +- .../providers/torrent_provider_tests.py | 2 +- tests/legacy/scene_helpers_tests.py | 2 +- tests/legacy/snatch_tests.py | 2 +- tests/legacy/tv_tests.py | 2 +- tests/legacy/xem_tests.py | 2 +- 66 files changed, 401 insertions(+), 241 deletions(-) diff --git a/medusa/__init__.py b/medusa/__init__.py index 346391b000..5853551ea8 100644 --- a/medusa/__init__.py +++ b/medusa/__init__.py @@ -16,8 +16,9 @@ # You should have received a copy of the GNU General Public License # along with Medusa. If not, see . # pylint: disable=too-many-lines -from . import app -from .init import initialize + +from medusa import app +from medusa.init import initialize # Initialize functions replacements initialize() diff --git a/medusa/__main__.py b/medusa/__main__.py index 5887258c20..4942e03171 100755 --- a/medusa/__main__.py +++ b/medusa/__main__.py @@ -62,31 +62,31 @@ from configobj import ConfigObj -from six import text_type - -from . import ( +from medusa import ( app, auto_post_processor, cache, db, event_queue, exception_handler, helpers, logger as app_logger, metadata, name_cache, naming, network_timezones, providers, scheduler, show_queue, show_updater, subtitles, torrent_checker, trakt_checker, version_checker ) -from .common import SD, SKIPPED, WANTED -from .config import ( +from medusa.common import SD, SKIPPED, WANTED +from medusa.config import ( CheckSection, ConfigMigrator, check_setting_bool, check_setting_float, check_setting_int, check_setting_list, check_setting_str, load_provider_setting, save_provider_setting ) -from .databases import cache_db, failed_db, main_db -from .event_queue import Events -from .indexers.indexer_config import INDEXER_TVDBV2, INDEXER_TVMAZE -from .providers.generic_provider import GenericProvider -from .providers.nzb.newznab import NewznabProvider -from .providers.torrent.rss.rsstorrent import TorrentRssProvider -from .search.backlog import BacklogSearchScheduler, BacklogSearcher -from .search.daily import DailySearcher -from .search.proper import ProperFinder -from .search.queue import ForcedSearchQueue, SearchQueue, SnatchQueue -from .server.core import AppWebServer -from .system.shutdown import Shutdown -from .tv import Series +from medusa.databases import cache_db, failed_db, main_db +from medusa.event_queue import Events +from medusa.indexers.indexer_config import INDEXER_TVDBV2, INDEXER_TVMAZE +from medusa.providers.generic_provider import GenericProvider +from medusa.providers.nzb.newznab import NewznabProvider +from medusa.providers.torrent.rss.rsstorrent import TorrentRssProvider +from medusa.search.backlog import BacklogSearchScheduler, BacklogSearcher +from medusa.search.daily import DailySearcher +from medusa.search.proper import ProperFinder +from medusa.search.queue import ForcedSearchQueue, SearchQueue, SnatchQueue +from medusa.server.core import AppWebServer +from medusa.system.shutdown import Shutdown +from medusa.tv import Series + +from six import text_type logger = logging.getLogger(__name__) diff --git a/medusa/auto_post_processor.py b/medusa/auto_post_processor.py index 683592d9a1..429fd4817c 100644 --- a/medusa/auto_post_processor.py +++ b/medusa/auto_post_processor.py @@ -21,7 +21,7 @@ import logging import os.path -from . import app +from medusa import app logger = logging.getLogger(__name__) diff --git a/medusa/black_and_white_list.py b/medusa/black_and_white_list.py index 9b5c8b5a98..5b165b73c4 100644 --- a/medusa/black_and_white_list.py +++ b/medusa/black_and_white_list.py @@ -24,7 +24,7 @@ import logging from adba.aniDBerrors import AniDBCommandTimeoutError -from . import app, db, helpers +from medusa import app, db, helpers logger = logging.getLogger(__name__) diff --git a/medusa/classes.py b/medusa/classes.py index a29c5551a2..a23f7b8739 100644 --- a/medusa/classes.py +++ b/medusa/classes.py @@ -232,7 +232,7 @@ def __init__(self, config, log=None): self.log = log def select_series(self, all_series): - from .helper.common import dateTimeFormat + from medusa.helper.common import dateTimeFormat search_results = [] series_names = [] diff --git a/medusa/common.py b/medusa/common.py index 3db15cd1f6..a4c8f67a59 100644 --- a/medusa/common.py +++ b/medusa/common.py @@ -311,7 +311,7 @@ def scene_quality(name, anime=False): :param anime: Boolean to indicate if the show we're resolving is Anime :return: Quality """ - from .tagger.episode import EpisodeTags + from medusa.tagger.episode import EpisodeTags if not name: return Quality.UNKNOWN else: diff --git a/medusa/db.py b/medusa/db.py index 0e24ce8a72..e49fc758b9 100644 --- a/medusa/db.py +++ b/medusa/db.py @@ -24,9 +24,10 @@ import time import warnings +from medusa import app, logger +from medusa.helper.exceptions import ex + from six import text_type -from . import app, logger -from .helper.exceptions import ex db_cons = {} db_locks = {} @@ -445,7 +446,7 @@ def restoreDatabase(version): :param version: Version to restore to :return: True if restore succeeds, False if it fails """ - from . import helpers + from medusa import helpers logger.log(u"Restoring database before trying upgrade again") if not helpers.restore_versioned_file(dbFilename(suffix='v' + str(version)), version): logger.log_error_and_exit(u"Database restore failed, abort upgrading database") diff --git a/medusa/failed_history.py b/medusa/failed_history.py index 4821b71e3d..8c75f290ee 100644 --- a/medusa/failed_history.py +++ b/medusa/failed_history.py @@ -19,11 +19,11 @@ """failed history code.""" import re from datetime import datetime, timedelta -from . import db, logger -from .common import FAILED, Quality, WANTED, statusStrings -from .helper.common import episode_num -from .helper.exceptions import EpisodeNotFoundException -from .show.history import History +from medusa import db, logger +from medusa.common import FAILED, Quality, WANTED, statusStrings +from medusa.helper.common import episode_num +from medusa.helper.exceptions import EpisodeNotFoundException +from medusa.show.history import History def prepare_failed_name(release): diff --git a/medusa/helper/common.py b/medusa/helper/common.py index d6de44e425..fc1b758b79 100644 --- a/medusa/helper/common.py +++ b/medusa/helper/common.py @@ -346,7 +346,7 @@ def enabled_providers(search_type): """ Return providers based on search type: daily, backlog and manualsearch """ - from .. import providers + from medusa import providers return [x for x in providers.sorted_provider_list(app.RANDOMIZE_PROVIDERS) if x.is_active() and x.get_id() not in app.BROKEN_PROVIDERS and hasattr(x, 'enable_{}'.format(search_type)) and diff --git a/medusa/history.py b/medusa/history.py index 3de4066dde..48803e6e92 100644 --- a/medusa/history.py +++ b/medusa/history.py @@ -20,9 +20,9 @@ import datetime import db -from .common import FAILED, Quality, SNATCHED, SUBTITLED -from .helper.encoding import ss -from .show.history import History +from medusa.common import FAILED, Quality, SNATCHED, SUBTITLED +from medusa.helper.encoding import ss +from medusa.show.history import History def _logHistoryItem(action, showid, season, episode, quality, resource, diff --git a/medusa/init/__init__.py b/medusa/init/__init__.py index a62cad1966..dca2a85d82 100644 --- a/medusa/init/__init__.py +++ b/medusa/init/__init__.py @@ -53,12 +53,12 @@ def _register_utf8_codec(): def _monkey_patch_fs_functions(): - from . import filesystem + from medusa.init import filesystem filesystem.initialize() def _monkey_patch_logging_functions(): - from . import logconfig + from medusa.init import logconfig logconfig.initialize() @@ -122,7 +122,9 @@ def _strptime_workaround(): def _configure_guessit(): """Replace guessit with a pre-configured one, so guessit.guessit() could be called directly in any place.""" import guessit - from ..name_parser.guessit_parser import guessit as pre_configured_guessit + from medusa.name_parser.guessit_parser import ( + guessit as pre_configured_guessit, + ) guessit.guessit = pre_configured_guessit diff --git a/medusa/naming.py b/medusa/naming.py index 3132cc91b1..7eb423b2f7 100644 --- a/medusa/naming.py +++ b/medusa/naming.py @@ -19,9 +19,9 @@ import datetime import os -from . import app, common, logger, tv -from .common import DOWNLOADED, Quality -from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser +from medusa import app, common, logger, tv +from medusa.common import DOWNLOADED, Quality +from medusa.name_parser.parser import InvalidNameException, InvalidShowException, NameParser name_presets = ( '%SN - %Sx%0E - %EN', diff --git a/medusa/network_timezones.py b/medusa/network_timezones.py index d33164f99f..9bfb6831f0 100644 --- a/medusa/network_timezones.py +++ b/medusa/network_timezones.py @@ -22,11 +22,12 @@ from app import BASE_PYMEDUSA_URL from dateutil import tz -from six import iteritems -from . import db, logger -from .helper.common import try_int -from .session.core import MedusaSafeSession +from medusa import db, logger +from medusa.helper.common import try_int +from medusa.session.core import MedusaSafeSession + +from six import iteritems try: app_timezone = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() diff --git a/medusa/notifiers/plex.py b/medusa/notifiers/plex.py index ad7de898c7..634e6147d1 100644 --- a/medusa/notifiers/plex.py +++ b/medusa/notifiers/plex.py @@ -48,7 +48,7 @@ def _notify_pht(message, title='Medusa', host=None, username=None, password=None The result will either be 'OK' or False, this is used to be parsed by the calling function. """ - from . import kodi_notifier + from medusa.notifiers import kodi_notifier # suppress notifications if the notifier is disabled but the notify options are checked if not app.USE_PLEX_CLIENT and not force: return False diff --git a/medusa/nzb_splitter.py b/medusa/nzb_splitter.py index 35b6c85ccd..f10c4924ac 100644 --- a/medusa/nzb_splitter.py +++ b/medusa/nzb_splitter.py @@ -21,11 +21,11 @@ import re -from . import classes, logger -from .helper.encoding import ss -from .helper.exceptions import ex -from .name_parser.parser import InvalidNameException, InvalidShowException, NameParser -from .session.core import MedusaSession +from medusa import classes, logger +from medusa.helper.encoding import ss +from medusa.helper.exceptions import ex +from medusa.name_parser.parser import InvalidNameException, InvalidShowException, NameParser +from medusa.session.core import MedusaSession try: import xml.etree.cElementTree as ETree diff --git a/medusa/sbdatetime.py b/medusa/sbdatetime.py index 3799ffc536..8fd62dd488 100644 --- a/medusa/sbdatetime.py +++ b/medusa/sbdatetime.py @@ -20,8 +20,8 @@ import functools import locale -from . import app -from .network_timezones import app_timezone +from medusa import app +from medusa.network_timezones import app_timezone date_presets = ( '%Y-%m-%d', diff --git a/medusa/scene_exceptions.py b/medusa/scene_exceptions.py index b9e41a59c8..72a6f05fa7 100644 --- a/medusa/scene_exceptions.py +++ b/medusa/scene_exceptions.py @@ -11,11 +11,13 @@ from collections import defaultdict import adba + +from medusa import app, db, helpers from medusa.indexers.indexer_api import indexerApi +from medusa.indexers.indexer_config import INDEXER_TVDBV2 +from medusa.session.core import MedusaSafeSession + from six import iteritems -from . import app, db, helpers -from .indexers.indexer_config import INDEXER_TVDBV2 -from .session.core import MedusaSafeSession logger = logging.getLogger(__name__) diff --git a/medusa/scene_numbering.py b/medusa/scene_numbering.py index 9e84a35e53..46ab662eab 100644 --- a/medusa/scene_numbering.py +++ b/medusa/scene_numbering.py @@ -25,11 +25,11 @@ import time import traceback -from . import app, db, logger -from .helper.exceptions import ex -from .indexers.indexer_api import indexerApi -from .scene_exceptions import safe_session -from .show.show import Show +from medusa import app, db, logger +from medusa.helper.exceptions import ex +from medusa.indexers.indexer_api import indexerApi +from medusa.scene_exceptions import safe_session +from medusa.show.show import Show def get_scene_numbering(indexer_id, indexer, season, episode, fallback_to_xem=True): diff --git a/medusa/server/core.py b/medusa/server/core.py index 914ea2092d..88bbd8af45 100644 --- a/medusa/server/core.py +++ b/medusa/server/core.py @@ -5,6 +5,15 @@ import os import threading +from medusa import ( + app, + logger, +) +from medusa.helpers import ( + create_https_certificates, + generate_api_key, +) +from medusa.server.api.v1.core import ApiHandler from medusa.server.api.v2.alias import AliasHandler from medusa.server.api.v2.alias_source import ( AliasSourceHandler, @@ -19,16 +28,25 @@ from medusa.server.api.v2.series_asset import SeriesAssetHandler from medusa.server.api.v2.series_legacy import SeriesLegacyHandler from medusa.server.api.v2.series_operation import SeriesOperationHandler +from medusa.server.web import ( + CalendarHandler, + KeyHandler, + LoginHandler, + LogoutHandler, + TokenHandler, +) +from medusa.server.web.core.base import AuthenticatedStaticFileHandler +from medusa.ws import MedusaWebSocketHandler + from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop -from tornado.web import Application, RedirectHandler, StaticFileHandler, url +from tornado.web import ( + Application, + RedirectHandler, + StaticFileHandler, + url, +) from tornroutes import route -from .api.v1.core import ApiHandler -from .web import CalendarHandler, KeyHandler, LoginHandler, LogoutHandler, TokenHandler -from .web.core.base import AuthenticatedStaticFileHandler -from .. import app, logger -from ..helpers import create_https_certificates, generate_api_key -from ..ws import MedusaWebSocketHandler def get_apiv2_handlers(base): diff --git a/medusa/server/web/__init__.py b/medusa/server/web/__init__.py index a589cb56bf..2a97b3facf 100644 --- a/medusa/server/web/__init__.py +++ b/medusa/server/web/__init__.py @@ -1,11 +1,44 @@ # coding=utf-8 -from .config import Config, ConfigAnime, ConfigBackupRestore, ConfigGeneral, ConfigNotifications, ConfigPostProcessing, ConfigProviders, \ - ConfigSearch, ConfigSubtitles -from .core import BaseHandler, CalendarHandler, ErrorLogs, History, KeyHandler, LoginHandler, LogoutHandler, PageTemplate, TokenHandler, UI, \ - WebFileBrowser, WebHandler, WebRoot, get_lookup, mako_cache, mako_lookup, mako_path -from .home import Home, HomeAddShows, HomeChangeLog, HomeIRC, HomeNews, HomePostProcess -from .manage import ( +from medusa.server.web.config import ( + Config, + ConfigAnime, + ConfigBackupRestore, + ConfigGeneral, + ConfigNotifications, + ConfigPostProcessing, + ConfigProviders, + ConfigSearch, + ConfigSubtitles, +) +from medusa.server.web.core import ( + BaseHandler, + CalendarHandler, + ErrorLogs, + History, + KeyHandler, + LoginHandler, + LogoutHandler, + PageTemplate, + TokenHandler, + UI, + WebFileBrowser, + WebHandler, + WebRoot, + get_lookup, + mako_cache, + mako_lookup, + mako_path, +) +from medusa.server.web.home import ( + Home, + HomeAddShows, + HomeChangeLog, + HomeIRC, + HomeNews, + HomePostProcess, +) +from medusa.server.web.manage import ( Manage, ManageSearches, ) diff --git a/medusa/server/web/config/__init__.py b/medusa/server/web/config/__init__.py index b6eb23cf28..e4cf08c6c6 100644 --- a/medusa/server/web/config/__init__.py +++ b/medusa/server/web/config/__init__.py @@ -1,11 +1,11 @@ # coding=utf-8 -from .anime import ConfigAnime -from .backup_restore import ConfigBackupRestore -from .general import ConfigGeneral -from .handler import Config -from .notifications import ConfigNotifications -from .post_processing import ConfigPostProcessing -from .providers import ConfigProviders -from .search import ConfigSearch -from .subtitles import ConfigSubtitles +from medusa.server.web.config.anime import ConfigAnime +from medusa.server.web.config.backup_restore import ConfigBackupRestore +from medusa.server.web.config.general import ConfigGeneral +from medusa.server.web.config.handler import Config +from medusa.server.web.config.notifications import ConfigNotifications +from medusa.server.web.config.post_processing import ConfigPostProcessing +from medusa.server.web.config.providers import ConfigProviders +from medusa.server.web.config.search import ConfigSearch +from medusa.server.web.config.subtitles import ConfigSubtitles diff --git a/medusa/server/web/config/anime.py b/medusa/server/web/config/anime.py index f8ac153913..b8e3c54dc6 100644 --- a/medusa/server/web/config/anime.py +++ b/medusa/server/web/config/anime.py @@ -6,10 +6,15 @@ import os +from medusa import ( + app, + config, + logger, + ui, +) +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate from tornroutes import route -from .handler import Config -from ..core import PageTemplate -from .... import app, config, logger, ui @route('/config/anime(/?.*)') diff --git a/medusa/server/web/config/backup_restore.py b/medusa/server/web/config/backup_restore.py index f20f3b63fd..daeab72f5e 100644 --- a/medusa/server/web/config/backup_restore.py +++ b/medusa/server/web/config/backup_restore.py @@ -5,10 +5,14 @@ import os import time +from medusa import ( + app, + helpers, +) +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate + from tornroutes import route -from .handler import Config -from ..core import PageTemplate -from .... import app, helpers @route('/config/backuprestore(/?.*)') diff --git a/medusa/server/web/config/general.py b/medusa/server/web/config/general.py index ba61fd4386..c5f6157db6 100644 --- a/medusa/server/web/config/general.py +++ b/medusa/server/web/config/general.py @@ -6,13 +6,23 @@ from github import GithubException -from tornroutes import route +from medusa import ( + app, + config, + github_client, + helpers, + logger, + ui, +) +from medusa.common import ( + Quality, + WANTED, +) +from medusa.helper.common import try_int +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate -from .handler import Config -from ..core import PageTemplate -from .... import app, config, github_client, helpers, logger, ui -from ....common import Quality, WANTED -from ....helper.common import try_int +from tornroutes import route @route('/config/general(/?.*)') diff --git a/medusa/server/web/config/handler.py b/medusa/server/web/config/handler.py index 5a582c1f2a..4b4db211fb 100644 --- a/medusa/server/web/config/handler.py +++ b/medusa/server/web/config/handler.py @@ -6,10 +6,14 @@ import os +from medusa import ( + app, + db, +) +from medusa.server.web.core import PageTemplate, WebRoot +from medusa.version_checker import CheckVersion + from tornroutes import route -from ..core import PageTemplate, WebRoot -from .... import app, db -from ....version_checker import CheckVersion @route('/config(/?.*)') diff --git a/medusa/server/web/config/notifications.py b/medusa/server/web/config/notifications.py index aec65eb394..fd8e87d273 100644 --- a/medusa/server/web/config/notifications.py +++ b/medusa/server/web/config/notifications.py @@ -6,11 +6,12 @@ import os +from medusa import app, config, logger, ui +from medusa.helper.common import try_int +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate + from tornroutes import route -from .handler import Config -from ..core import PageTemplate -from .... import app, config, logger, ui -from ....helper.common import try_int @route('/config/notifications(/?.*)') diff --git a/medusa/server/web/config/post_processing.py b/medusa/server/web/config/post_processing.py index 6f4b1329bd..265aa5dfef 100644 --- a/medusa/server/web/config/post_processing.py +++ b/medusa/server/web/config/post_processing.py @@ -6,12 +6,19 @@ import os +from medusa import ( + app, + config, + logger, + naming, + ui, +) +from medusa.helper.exceptions import ex +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate + from tornroutes import route from unrar2 import RarFile -from .handler import Config -from ..core import PageTemplate -from .... import app, config, logger, naming, ui -from ....helper.exceptions import ex @route('/config/postProcessing(/?.*)') diff --git a/medusa/server/web/config/search.py b/medusa/server/web/config/search.py index 3e2e6cc366..163c89fa01 100644 --- a/medusa/server/web/config/search.py +++ b/medusa/server/web/config/search.py @@ -6,11 +6,17 @@ import os +from medusa import ( + app, + config, + logger, + ui, +) +from medusa.helper.common import try_int +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate + from tornroutes import route -from .handler import Config -from ..core import PageTemplate -from .... import app, config, logger, ui -from ....helper.common import try_int @route('/config/search(/?.*)') diff --git a/medusa/server/web/config/subtitles.py b/medusa/server/web/config/subtitles.py index be3a665a40..f675070c1b 100644 --- a/medusa/server/web/config/subtitles.py +++ b/medusa/server/web/config/subtitles.py @@ -6,10 +6,17 @@ import os +from medusa import ( + app, + config, + logger, + subtitles, + ui, +) +from medusa.server.web.config.handler import Config +from medusa.server.web.core import PageTemplate + from tornroutes import route -from .handler import Config -from ..core import PageTemplate -from .... import app, config, logger, subtitles, ui @route('/config/subtitles(/?.*)') diff --git a/medusa/server/web/core/__init__.py b/medusa/server/web/core/__init__.py index 4ac3748650..712475cffd 100644 --- a/medusa/server/web/core/__init__.py +++ b/medusa/server/web/core/__init__.py @@ -1,13 +1,23 @@ # coding=utf-8 -from .authentication import ( +from medusa.server.web.core.authentication import ( KeyHandler, LoginHandler, LogoutHandler, ) -from .base import BaseHandler, PageTemplate, UI, WebHandler, WebRoot, get_lookup, mako_cache, mako_lookup, mako_path -from .calendar import CalendarHandler -from .error_logs import ErrorLogs -from .file_browser import WebFileBrowser -from .history import History -from .token import TokenHandler +from medusa.server.web.core.base import ( + BaseHandler, + PageTemplate, + UI, + WebHandler, + WebRoot, + get_lookup, + mako_cache, + mako_lookup, + mako_path, +) +from medusa.server.web.core.calendar import CalendarHandler +from medusa.server.web.core.error_logs import ErrorLogs +from medusa.server.web.core.file_browser import WebFileBrowser +from medusa.server.web.core.history import History +from medusa.server.web.core.token import TokenHandler diff --git a/medusa/server/web/core/authentication.py b/medusa/server/web/core/authentication.py index a06c4b0fe0..3765555a64 100644 --- a/medusa/server/web/core/authentication.py +++ b/medusa/server/web/core/authentication.py @@ -9,9 +9,18 @@ import traceback +from medusa import ( + app, + helpers, + logger, + notifiers, +) +from medusa.server.web.core.base import ( + BaseHandler, + PageTemplate, +) + from tornado.web import RequestHandler -from .base import BaseHandler, PageTemplate -from .... import app, helpers, logger, notifiers class KeyHandler(RequestHandler): diff --git a/medusa/server/web/core/base.py b/medusa/server/web/core/base.py index 1f511babda..34547b87d7 100644 --- a/medusa/server/web/core/base.py +++ b/medusa/server/web/core/base.py @@ -15,23 +15,40 @@ from mako.runtime import UNDEFINED from mako.template import Template as MakoTemplate -from requests.compat import urljoin +from medusa import ( + app, + classes, + db, + exception_handler, + helpers, + logger, + network_timezones, + ui, +) +from medusa.server.api.v1.core import function_mapper +from medusa.show.coming_episodes import ComingEpisodes -from six import binary_type, iteritems, text_type +from requests.compat import urljoin +from six import ( + binary_type, + iteritems, + text_type, +) from tornado.concurrent import run_on_executor from tornado.escape import utf8 from tornado.gen import coroutine from tornado.ioloop import IOLoop from tornado.process import cpu_count -from tornado.web import HTTPError, RequestHandler, StaticFileHandler, addslash, authenticated - +from tornado.web import ( + HTTPError, + RequestHandler, + StaticFileHandler, + addslash, + authenticated, +) from tornroutes import route -from ...api.v1.core import function_mapper -from .... import app, classes, db, exception_handler, helpers, logger, network_timezones, ui -from ....show.coming_episodes import ComingEpisodes - mako_lookup = None mako_cache = None diff --git a/medusa/server/web/core/calendar.py b/medusa/server/web/core/calendar.py index 1d34f77913..befdef9ca7 100644 --- a/medusa/server/web/core/calendar.py +++ b/medusa/server/web/core/calendar.py @@ -10,10 +10,12 @@ import datetime from dateutil import tz + +from medusa import app, db, logger, network_timezones +from medusa.helper.common import try_int +from medusa.server.web.core.base import BaseHandler + from tornado.web import authenticated -from .base import BaseHandler -from .... import app, db, logger, network_timezones -from ....helper.common import try_int class CalendarHandler(BaseHandler): diff --git a/medusa/server/web/core/file_browser.py b/medusa/server/web/core/file_browser.py index 4bb69bcab9..fc26a86795 100644 --- a/medusa/server/web/core/file_browser.py +++ b/medusa/server/web/core/file_browser.py @@ -5,9 +5,10 @@ import json import os +from medusa.browser import list_folders +from medusa.server.web.core.base import WebRoot + from tornroutes import route -from .base import WebRoot -from ....browser import list_folders @route('/browser(/?.*)') diff --git a/medusa/server/web/core/history.py b/medusa/server/web/core/history.py index 520d24330d..b46de1dd41 100644 --- a/medusa/server/web/core/history.py +++ b/medusa/server/web/core/history.py @@ -2,11 +2,12 @@ from __future__ import unicode_literals +from medusa import app, ui +from medusa.helper.common import try_int +from medusa.server.web.core.base import PageTemplate, WebRoot +from medusa.show.history import History as HistoryTool + from tornroutes import route -from .base import PageTemplate, WebRoot -from .... import app, ui -from ....helper.common import try_int -from ....show.history import History as HistoryTool @route('/history(/?.*)') diff --git a/medusa/server/web/home/__init__.py b/medusa/server/web/home/__init__.py index 033518da03..62043fa853 100644 --- a/medusa/server/web/home/__init__.py +++ b/medusa/server/web/home/__init__.py @@ -1,9 +1,9 @@ # coding=utf-8 -from .add_recommended import HomeAddRecommended -from .add_shows import HomeAddShows -from .change_log import HomeChangeLog -from .handler import Home -from .irc import HomeIRC -from .news import HomeNews -from .post_process import HomePostProcess +from medusa.server.web.home.add_recommended import HomeAddRecommended +from medusa.server.web.home.add_shows import HomeAddShows +from medusa.server.web.home.change_log import HomeChangeLog +from medusa.server.web.home.handler import Home +from medusa.server.web.home.irc import HomeIRC +from medusa.server.web.home.news import HomeNews +from medusa.server.web.home.post_process import HomePostProcess diff --git a/medusa/server/web/home/add_recommended.py b/medusa/server/web/home/add_recommended.py index ad41d22eda..a236f9b3ee 100644 --- a/medusa/server/web/home/add_recommended.py +++ b/medusa/server/web/home/add_recommended.py @@ -17,9 +17,10 @@ from __future__ import unicode_literals +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home + from tornroutes import route -from .handler import Home -from ..core import PageTemplate @route('/addRecommended(/?.*)') diff --git a/medusa/server/web/home/add_shows.py b/medusa/server/web/home/add_shows.py index 51761e3b0f..0595eb4f77 100644 --- a/medusa/server/web/home/add_shows.py +++ b/medusa/server/web/home/add_shows.py @@ -7,32 +7,28 @@ import os import re +from medusa import app, classes, config, db, helpers, logger, ui +from medusa.black_and_white_list import short_group_names +from medusa.common import Quality +from medusa.helper.common import sanitize_filename, try_int +from medusa.helpers import get_showname_from_indexer +from medusa.indexers.indexer_api import indexerApi +from medusa.indexers.indexer_config import INDEXER_TVDBV2 +from medusa.indexers.indexer_exceptions import IndexerException, IndexerUnavailable +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home +from medusa.show.recommendations.anidb import AnidbPopular +from medusa.show.recommendations.imdb import ImdbPopular +from medusa.show.recommendations.trakt import TraktPopular +from medusa.show.show import Show + from requests import RequestException from requests.compat import unquote_plus - from simpleanidb import REQUEST_HOT - from six import iteritems - from tornroutes import route - from traktor import TraktApi -from .handler import Home -from ..core import PageTemplate -from .... import app, classes, config, db, helpers, logger, ui -from ....black_and_white_list import short_group_names -from ....common import Quality -from ....helper.common import sanitize_filename, try_int -from ....helpers import get_showname_from_indexer -from ....indexers.indexer_api import indexerApi -from ....indexers.indexer_config import INDEXER_TVDBV2 -from ....indexers.indexer_exceptions import IndexerException, IndexerUnavailable -from ....show.recommendations.anidb import AnidbPopular -from ....show.recommendations.imdb import ImdbPopular -from ....show.recommendations.trakt import TraktPopular -from ....show.show import Show - @route('/addShows(/?.*)') class HomeAddShows(Home): diff --git a/medusa/server/web/home/change_log.py b/medusa/server/web/home/change_log.py index 699b456f70..9d78a94432 100644 --- a/medusa/server/web/home/change_log.py +++ b/medusa/server/web/home/change_log.py @@ -3,11 +3,13 @@ from __future__ import unicode_literals import markdown2 + +from medusa import app, logger +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home +from medusa.session.core import MedusaSafeSession + from tornroutes import route -from .handler import Home -from ..core import PageTemplate -from .... import app, logger -from ....session.core import MedusaSafeSession @route('/changes(/?.*)') diff --git a/medusa/server/web/home/irc.py b/medusa/server/web/home/irc.py index cb9e885278..71d1f64919 100644 --- a/medusa/server/web/home/irc.py +++ b/medusa/server/web/home/irc.py @@ -2,9 +2,10 @@ from __future__ import unicode_literals +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home + from tornroutes import route -from .handler import Home -from ..core import PageTemplate @route('/IRC(/?.*)') diff --git a/medusa/server/web/home/news.py b/medusa/server/web/home/news.py index e72398fab4..c979eb376a 100644 --- a/medusa/server/web/home/news.py +++ b/medusa/server/web/home/news.py @@ -3,10 +3,12 @@ from __future__ import unicode_literals import markdown2 + +from medusa import app, logger +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home + from tornroutes import route -from .handler import Home -from ..core import PageTemplate -from .... import app, logger @route('/news(/?.*)') diff --git a/medusa/server/web/home/post_process.py b/medusa/server/web/home/post_process.py index 95296a5bcb..c23dddf076 100644 --- a/medusa/server/web/home/post_process.py +++ b/medusa/server/web/home/post_process.py @@ -2,15 +2,14 @@ from __future__ import unicode_literals -from six import string_types +from medusa import process_tv +from medusa.helper.encoding import ss +from medusa.server.web.core import PageTemplate +from medusa.server.web.home.handler import Home +from six import string_types from tornroutes import route -from .handler import Home -from ..core import PageTemplate -from .... import process_tv -from ....helper.encoding import ss - @route('/home/postprocess(/?.*)') class HomePostProcess(Home): diff --git a/medusa/server/web/manage/__init__.py b/medusa/server/web/manage/__init__.py index a105b76623..b3ae0e2a49 100644 --- a/medusa/server/web/manage/__init__.py +++ b/medusa/server/web/manage/__init__.py @@ -1,4 +1,4 @@ # coding=utf-8 -from .handler import Manage -from .searches import ManageSearches +from medusa.server.web.manage.handler import Manage +from medusa.server.web.manage.searches import ManageSearches diff --git a/medusa/server/web/manage/searches.py b/medusa/server/web/manage/searches.py index 1b590aeb93..ae4eef4a82 100644 --- a/medusa/server/web/manage/searches.py +++ b/medusa/server/web/manage/searches.py @@ -2,10 +2,11 @@ from __future__ import unicode_literals +from medusa import app, logger, ui +from medusa.server.web.core import PageTemplate +from medusa.server.web.manage.handler import Manage + from tornroutes import route -from .handler import Manage -from ..core import PageTemplate -from .... import app, logger, ui @route('/manage/manageSearches(/?.*)') diff --git a/medusa/session/factory.py b/medusa/session/factory.py index 804c1f3329..18d17f5509 100644 --- a/medusa/session/factory.py +++ b/medusa/session/factory.py @@ -4,7 +4,7 @@ from cachecontrol import CacheControlAdapter from cachecontrol.cache import DictCache -from .. import app +from medusa import app log = logging.getLogger(__name__) log.addHandler(logging.NullHandler()) diff --git a/medusa/show/coming_episodes.py b/medusa/show/coming_episodes.py index 19b3b9e449..da0f0b28e3 100644 --- a/medusa/show/coming_episodes.py +++ b/medusa/show/coming_episodes.py @@ -18,14 +18,19 @@ from datetime import date, timedelta +from medusa import app +from medusa.common import ( + IGNORED, + Quality, + UNAIRED, + WANTED, +) +from medusa.db import DBConnection +from medusa.helper.common import dateFormat, timeFormat from medusa.helpers.quality import get_quality_string +from medusa.network_timezones import parse_date_time +from medusa.sbdatetime import sbdatetime from medusa.tv.series import SeriesIdentifier -from .. import app -from ..common import IGNORED, Quality, UNAIRED, WANTED -from ..db import DBConnection -from ..helper.common import dateFormat, timeFormat -from ..network_timezones import parse_date_time -from ..sbdatetime import sbdatetime class ComingEpisodes(object): diff --git a/medusa/show/history.py b/medusa/show/history.py index 83513f6277..cbd956477e 100644 --- a/medusa/show/history.py +++ b/medusa/show/history.py @@ -18,16 +18,17 @@ from collections import namedtuple from datetime import datetime, timedelta +from medusa.common import Quality +from medusa.helper.common import try_int + from six import text_type -from ..common import Quality -from ..helper.common import try_int class History(object): date_format = '%Y%m%d%H%M%S' def __init__(self): - from ..db import DBConnection + from medusa.db import DBConnection self.db = DBConnection() def clear(self): diff --git a/medusa/show/recommendations/recommended.py b/medusa/show/recommendations/recommended.py index e5bbcee1bf..f5e878207c 100644 --- a/medusa/show/recommendations/recommended.py +++ b/medusa/show/recommendations/recommended.py @@ -20,8 +20,11 @@ import os import posixpath -from ... import app, helpers -from ...session.core import MedusaSession +from medusa import ( + app, + helpers, +) +from medusa.session.core import MedusaSession class MissingTvdbMapping(Exception): diff --git a/medusa/show/show.py b/medusa/show/show.py index dba7a269fb..650a57ef4f 100644 --- a/medusa/show/show.py +++ b/medusa/show/show.py @@ -18,10 +18,19 @@ from datetime import date -from .. import app -from ..common import Quality, SKIPPED, WANTED -from ..db import DBConnection -from ..helper.exceptions import CantRefreshShowException, CantRemoveShowException, MultipleShowObjectsException, ex +from medusa import app +from medusa.common import ( + Quality, + SKIPPED, + WANTED, +) +from medusa.db import DBConnection +from medusa.helper.exceptions import ( + CantRefreshShowException, + CantRemoveShowException, + MultipleShowObjectsException, + ex, +) class Show(object): diff --git a/medusa/show_updater.py b/medusa/show_updater.py index b49000dccb..bf05510868 100644 --- a/medusa/show_updater.py +++ b/medusa/show_updater.py @@ -20,15 +20,14 @@ import threading import time -import app -from requests.exceptions import HTTPError +from medusa import app, db, network_timezones, ui +from medusa.helper.exceptions import CantRefreshShowException, CantUpdateShowException +from medusa.indexers.indexer_api import indexerApi +from medusa.indexers.indexer_exceptions import IndexerException, IndexerUnavailable +from medusa.scene_exceptions import refresh_exceptions_cache +from medusa.session.core import MedusaSession -from . import db, network_timezones, ui -from .helper.exceptions import CantRefreshShowException, CantUpdateShowException -from .indexers.indexer_api import indexerApi -from .indexers.indexer_exceptions import IndexerException, IndexerUnavailable -from .scene_exceptions import refresh_exceptions_cache -from .session.core import MedusaSession +from requests.exceptions import HTTPError logger = logging.getLogger(__name__) diff --git a/medusa/subtitles.py b/medusa/subtitles.py index eb37eded5b..2cda8d61e4 100644 --- a/medusa/subtitles.py +++ b/medusa/subtitles.py @@ -32,24 +32,21 @@ import knowit +from medusa import app, db, helpers, history +from medusa.cache import cache, memory_cache +from medusa.common import Quality, cpu_presets +from medusa.helper.common import dateTimeFormat, episode_num, remove_extension, subtitle_extensions +from medusa.helper.exceptions import ex +from medusa.helpers import is_media_file, is_rar_file +from medusa.show.show import Show from medusa.subtitle_providers.utils import hash_itasa from six import iteritems, string_types, text_type - from subliminal import ProviderPool, compute_score, provider_manager, refine, save_subtitles, scan_video from subliminal.core import search_external_subtitles from subliminal.score import episode_scores from subliminal.subtitle import get_subtitle_path -from . import app, db, helpers, history -from .cache import cache, memory_cache -from .common import Quality, cpu_presets -from .helper.common import dateTimeFormat, episode_num, remove_extension, subtitle_extensions -from .helper.exceptions import ex -from .helpers import is_media_file, is_rar_file -from .show.show import Show - - logger = logging.getLogger(__name__) PROVIDER_POOL_EXPIRATION_TIME = datetime.timedelta(minutes=15).total_seconds() @@ -821,8 +818,8 @@ def __init__(self): @staticmethod def subtitles_download_in_pp(): # pylint: disable=too-many-locals, too-many-branches, too-many-statements """Check for needed subtitles in the post process folder.""" - from . import process_tv - from .tv import Episode + from medusa import process_tv + from medusa.tv import Episode logger.info(u'Checking for needed subtitles in Post-Process folder') @@ -898,7 +895,7 @@ def unpack_rar_files(dirpath): :param dirpath: the directory path to be used :type dirpath: str """ - from . import process_tv + from medusa import process_tv for root, _, files in os.walk(dirpath, topdown=False): # Skip folders that are being used for unpacking if u'_UNPACK' in root.upper(): diff --git a/medusa/torrent_checker.py b/medusa/torrent_checker.py index 3261d64fda..4375c2f5d5 100644 --- a/medusa/torrent_checker.py +++ b/medusa/torrent_checker.py @@ -19,7 +19,7 @@ import logging -import app +from medusa import app from medusa.clients import torrent logger = logging.getLogger(__name__) diff --git a/medusa/ui.py b/medusa/ui.py index 2a50debb3a..2c744ffa50 100644 --- a/medusa/ui.py +++ b/medusa/ui.py @@ -19,8 +19,8 @@ import datetime import json -from . import app -from .ws.MedusaWebSocketHandler import push_to_web_socket +from medusa import app +from medusa.ws.MedusaWebSocketHandler import push_to_web_socket MESSAGE = 'notice' ERROR = 'error' diff --git a/tests/legacy/db_tests.py b/tests/legacy/db_tests.py index d0cd090a43..9ddf7258e0 100644 --- a/tests/legacy/db_tests.py +++ b/tests/legacy/db_tests.py @@ -22,7 +22,7 @@ import threading -from . import test_lib as test +from tests.legacy import test_lib as test class DBBasicTests(test.AppTestDBCase): diff --git a/tests/legacy/media/show_banner_tests.py b/tests/legacy/media/show_banner_tests.py index 1cb98211b3..5da051483a 100644 --- a/tests/legacy/media/show_banner_tests.py +++ b/tests/legacy/media/show_banner_tests.py @@ -19,7 +19,7 @@ from __future__ import print_function from medusa.media.banner import ShowBanner -from .generic_media_tests import GenericMediaTests +from tests.legacy.media.generic_media_tests import GenericMediaTests class ShowBannerTests(GenericMediaTests): diff --git a/tests/legacy/media/show_fan_art_tests.py b/tests/legacy/media/show_fan_art_tests.py index 3d52ba3a29..4b6da1ca94 100644 --- a/tests/legacy/media/show_fan_art_tests.py +++ b/tests/legacy/media/show_fan_art_tests.py @@ -19,7 +19,7 @@ from __future__ import print_function from medusa.media.fan_art import ShowFanArt -from .generic_media_tests import GenericMediaTests +from tests.legacy.media.generic_media_tests import GenericMediaTests class ShowFanArtTests(GenericMediaTests): diff --git a/tests/legacy/media/show_network_logo_tests.py b/tests/legacy/media/show_network_logo_tests.py index 404a1f1899..ed529d32b1 100644 --- a/tests/legacy/media/show_network_logo_tests.py +++ b/tests/legacy/media/show_network_logo_tests.py @@ -21,7 +21,7 @@ import os from medusa.media.network_logo import ShowNetworkLogo -from .generic_media_tests import GenericMediaTests +from tests.legacy.media.generic_media_tests import GenericMediaTests class ShowNetworkLogoTests(GenericMediaTests): diff --git a/tests/legacy/media/show_poster_tests.py b/tests/legacy/media/show_poster_tests.py index 29eb9b798b..a917b418a9 100644 --- a/tests/legacy/media/show_poster_tests.py +++ b/tests/legacy/media/show_poster_tests.py @@ -19,7 +19,7 @@ from __future__ import print_function from medusa.media.poster import ShowPoster -from .generic_media_tests import GenericMediaTests +from tests.legacy.media.generic_media_tests import GenericMediaTests class ShowPosterTests(GenericMediaTests): diff --git a/tests/legacy/notifier_tests.py b/tests/legacy/notifier_tests.py index 3f28ec3dd9..aebd826a08 100644 --- a/tests/legacy/notifier_tests.py +++ b/tests/legacy/notifier_tests.py @@ -29,7 +29,7 @@ from medusa.notifiers.prowl import Notifier as ProwlNotifier from medusa.server.web import Home from medusa.tv import Episode, Series -from . import test_lib as test +from tests.legacy import test_lib as test class NotifierTests(test.AppTestDBCase): diff --git a/tests/legacy/pp_tests.py b/tests/legacy/pp_tests.py index 0a04dc55e3..9a3312b7df 100644 --- a/tests/legacy/pp_tests.py +++ b/tests/legacy/pp_tests.py @@ -25,7 +25,7 @@ from medusa.name_cache import addNameToCache from medusa.post_processor import PostProcessor from medusa.tv import Episode, Series -from . import test_lib as test +from tests.legacy import test_lib as test class PPInitTests(unittest.TestCase): diff --git a/tests/legacy/providers/nzb_provider_tests.py b/tests/legacy/providers/nzb_provider_tests.py index 69528b201e..139f982edb 100644 --- a/tests/legacy/providers/nzb_provider_tests.py +++ b/tests/legacy/providers/nzb_provider_tests.py @@ -23,8 +23,10 @@ from medusa import app from medusa.providers.generic_provider import GenericProvider from medusa.providers.nzb.nzb_provider import NZBProvider + from six import iteritems -from .generic_provider_tests import GenericProviderTests + +from tests.legacy.providers.generic_provider_tests import GenericProviderTests class NZBProviderTests(GenericProviderTests): diff --git a/tests/legacy/providers/torrent_provider_tests.py b/tests/legacy/providers/torrent_provider_tests.py index 00b5fd56a2..7b14136acf 100644 --- a/tests/legacy/providers/torrent_provider_tests.py +++ b/tests/legacy/providers/torrent_provider_tests.py @@ -24,7 +24,7 @@ from medusa.providers.generic_provider import GenericProvider from medusa.providers.torrent.torrent_provider import TorrentProvider from six import iteritems -from .generic_provider_tests import GenericProviderTests +from tests.legacy.providers.generic_provider_tests import GenericProviderTests class TorrentProviderTests(GenericProviderTests): diff --git a/tests/legacy/scene_helpers_tests.py b/tests/legacy/scene_helpers_tests.py index de0e2a623a..a5811a20b8 100644 --- a/tests/legacy/scene_helpers_tests.py +++ b/tests/legacy/scene_helpers_tests.py @@ -6,7 +6,7 @@ from medusa import common, db, name_cache, scene_exceptions from medusa.show import naming from medusa.tv import Series as Show -from . import test_lib as test +from tests.legacy import test_lib as test class SceneTests(test.AppTestDBCase): diff --git a/tests/legacy/snatch_tests.py b/tests/legacy/snatch_tests.py index c2492593cb..5721268004 100644 --- a/tests/legacy/snatch_tests.py +++ b/tests/legacy/snatch_tests.py @@ -25,7 +25,7 @@ from medusa import app, common, providers from medusa.search.core import search_providers from medusa.tv import Episode, Series -from . import test_lib as test +from tests.legacy import test_lib as test TESTS = { "Dexter": {"a": 1, "q": common.HD, "s": 5, "e": [7], "b": 'Dexter.S05E07.720p.BluRay.X264-REWARD', diff --git a/tests/legacy/tv_tests.py b/tests/legacy/tv_tests.py index 8c10873e25..b512e1f465 100644 --- a/tests/legacy/tv_tests.py +++ b/tests/legacy/tv_tests.py @@ -22,7 +22,7 @@ from medusa import app from medusa.tv import Episode, Series -from . import test_lib as test +from tests.legacy import test_lib as test class TVShowTests(test.AppTestDBCase): diff --git a/tests/legacy/xem_tests.py b/tests/legacy/xem_tests.py index 5e100cac3f..0c9c54695c 100644 --- a/tests/legacy/xem_tests.py +++ b/tests/legacy/xem_tests.py @@ -24,7 +24,7 @@ from medusa import app from medusa.tv import Series -from . import test_lib as test +from tests.legacy import test_lib as test class XEMBasicTests(test.AppTestDBCase): From 4f6f8e8362cd6e941ad70ae86c40df1a92e956bb Mon Sep 17 00:00:00 2001 From: Fernando Date: Sun, 31 Dec 2017 11:11:16 -0200 Subject: [PATCH 21/35] Remove broad exception for news. Remove duplicate log (#3543) * Fall back to local network_timezones * Remove broad exception for news. Remove duplicate log * Unused import * Don't fallback to local file * Delete network_timezones.txt * Revert log * Review and more broad exception * Log in the exceptions --- medusa/server/web/home/news.py | 10 ++++------ medusa/version_checker.py | 27 +++++++++++++-------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/medusa/server/web/home/news.py b/medusa/server/web/home/news.py index c979eb376a..f13aeb2244 100644 --- a/medusa/server/web/home/news.py +++ b/medusa/server/web/home/news.py @@ -4,7 +4,7 @@ import markdown2 -from medusa import app, logger +from medusa import app from medusa.server.web.core import PageTemplate from medusa.server.web.home.handler import Home @@ -17,11 +17,9 @@ def __init__(self, *args, **kwargs): super(HomeNews, self).__init__(*args, **kwargs) def index(self): - try: - news = app.version_check_scheduler.action.check_for_new_news(force=True) - except Exception: - logger.log('Could not load news from repo, giving a link!', logger.DEBUG) - news = 'Could not load news from the repo. [Click here for news.md]({url})'.format(url=app.NEWS_URL) + news = app.version_check_scheduler.action.check_for_new_news(force=True) + if not news: + news = 'Could not load news from the repository. [Click here for news.md]({url})'.format(url=app.NEWS_URL) app.NEWS_LAST_READ = app.NEWS_LATEST app.NEWS_UNREAD = 0 diff --git a/medusa/version_checker.py b/medusa/version_checker.py index 3066c6dc17..a65f641bf4 100644 --- a/medusa/version_checker.py +++ b/medusa/version_checker.py @@ -306,32 +306,31 @@ def check_for_new_news(self, force=False): :force: ignored """ # Grab a copy of the news - log.debug(u'check_for_new_news: Checking GitHub for latest news.') - try: - news = self.session.get(app.NEWS_URL).text - except Exception: - log.warning(u'check_for_new_news: Could not load news from repo.') - news = '' - - if not news: - return '' + log.debug(u'Checking GitHub for latest news.') + response = self.session.get(app.NEWS_URL) + if not response or not response.text: + log.debug(u'Could not load news from URL: %s', app.NEWS_URL) + return try: last_read = datetime.datetime.strptime(app.NEWS_LAST_READ, '%Y-%m-%d') - except Exception: + except ValueError: + log.warning(u'Invalid news last read date: %s', app.NEWS_LAST_READ) last_read = 0 + news = response.text app.NEWS_UNREAD = 0 - gotLatest = False + got_latest = False for match in re.finditer(r'^####\s*(\d{4}-\d{2}-\d{2})\s*####', news, re.M): - if not gotLatest: - gotLatest = True + if not got_latest: + got_latest = True app.NEWS_LATEST = match.group(1) try: if datetime.datetime.strptime(match.group(1), '%Y-%m-%d') > last_read: app.NEWS_UNREAD += 1 - except Exception: + except ValueError: + log.warning(u'Unable to match latest news date. Repository news date: %s', match.group(1)) pass return news From d42eefb7b0882dd435218182d411553d9977333b Mon Sep 17 00:00:00 2001 From: bobbysteel Date: Fri, 5 Jan 2018 14:07:04 +0000 Subject: [PATCH 22/35] Rev alpine linux to 3.7 after testing (#3561) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a2447e6936..90ed264e1d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM lsiobase/alpine.python:3.6 +FROM lsiobase/alpine.python:3.7 MAINTAINER bobbysteel # set version label From 3049b7e93912d1bff00e1fe3378a3daf689aee09 Mon Sep 17 00:00:00 2001 From: Labrys of Knossos Date: Sat, 6 Jan 2018 09:25:25 -0500 Subject: [PATCH 23/35] Update readme (#3510) Remove screenshots as the IMGUR album no longer exists Remove torrentproject and tpb from special thanks section since they are effectively dead --- readme.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/readme.md b/readme.md index 096330ca49..3177c61c7e 100644 --- a/readme.md +++ b/readme.md @@ -40,10 +40,6 @@ - DupeKey/DupeScore for NZBGet 12+ - Real SSL certificate validation -#### Screenshots -- [Desktop (Full-HD)](http://imgur.com/a/4fpBk) -- [Mobile](http://imgur.com/a/WPyG6) - #### Dependencies To run Medusa from source you will need Python 2.7.10 @@ -65,8 +61,6 @@ A full list can be found [here](https://github.com/pymedusa/Medusa/wiki/Medusa-S #### Special Thanks to: ![image](https://rarbg.com/favicon.ico)[RARBG](https://rarbg.to) -![image](https://torrentproject.se/favicon.ico)[TorrentProject](https://torrentproject.se/about) -![image](https://thepiratebay.se/favicon.ico)[ThePirateBay](https://thepiratebay.se/) ![image](https://nzb.cat/favicon.ico)[NZB.cat](https://nzb.cat/) ![image](https://nzbgeek.info/favicon.ico)[NZBGeek](https://nzbgeek.info) ![image](https://raw.githubusercontent.com/pymedusa/Medusa/master/static/images/providers/dognzb.png)[DOGnzb](https://dognzb.cr) From 5e69e4e27e6d5d303b68cc81ca27be9e8ff3f512 Mon Sep 17 00:00:00 2001 From: Fernando Date: Sat, 6 Jan 2018 16:55:56 -0200 Subject: [PATCH 24/35] Feature/log info (#3577) * IMDB log message as INFO * Change network timezones log message to INFO * This is a dev log. Not user info * This also spams warning because of CDN * Disable imdb call --- medusa/helpers/__init__.py | 8 ++++---- medusa/name_parser/rules/rules.py | 4 ++-- medusa/network_timezones.py | 2 +- medusa/show_queue.py | 6 +++--- medusa/tv/series.py | 2 ++ 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/medusa/helpers/__init__.py b/medusa/helpers/__init__.py index 63a86f0451..e0c8a3e9a4 100644 --- a/medusa/helpers/__init__.py +++ b/medusa/helpers/__init__.py @@ -1780,10 +1780,10 @@ def get_broken_providers(): response = MedusaSafeSession().get_json(url) if response is None: - log.warning('Unable to update the list with broken providers.' - ' This list is used to disable broken providers.' - ' You may encounter errors in the log files if you are' - ' using a broken provider.') + log.info('Unable to update the list with broken providers.' + ' This list is used to disable broken providers.' + ' You may encounter errors in the log files if you are' + ' using a broken provider.') return [] log.info('Broken providers found: {0}', response) diff --git a/medusa/name_parser/rules/rules.py b/medusa/name_parser/rules/rules.py index f6763b1415..e76c92917d 100644 --- a/medusa/name_parser/rules/rules.py +++ b/medusa/name_parser/rules/rules.py @@ -1212,8 +1212,8 @@ def when(self, matches, context): to_remove.extend(matches.named('title', predicate=lambda match: match.value != values[0].value)) continue - log.info(u"Guessed more than one '%s' for '%s': %s", - name, matches.input_string, u','.join(unique_values), exc_info=False) + log.debug(u"Guessed more than one '%s' for '%s': %s", + name, matches.input_string, u','.join(unique_values), exc_info=False) to_remove.extend(values) return to_remove diff --git a/medusa/network_timezones.py b/medusa/network_timezones.py index 9bfb6831f0..2c0ae0e629 100644 --- a/medusa/network_timezones.py +++ b/medusa/network_timezones.py @@ -51,7 +51,7 @@ def update_network_dict(): url = '{base_url}/sb_network_timezones/network_timezones.txt'.format(base_url=BASE_PYMEDUSA_URL) response = session.get(url) if not response or not response.text: - logger.log(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url, logger.WARNING) + logger.log(u'Updating network timezones failed, this can happen from time to time. URL: %s' % url, logger.INFO) load_network_dict() return diff --git a/medusa/show_queue.py b/medusa/show_queue.py index a51a297a60..fa969fc3aa 100644 --- a/medusa/show_queue.py +++ b/medusa/show_queue.py @@ -532,7 +532,7 @@ def run(self): try: self.show.load_imdb_info() except IMDbHTTPError as e: - logger.log(u"Something wrong on IMDb api: " + e.message, logger.WARNING) + logger.log(u"Something wrong on IMDb api: " + e.message, logger.INFO) except Exception as e: logger.log(u"Error loading IMDb info: " + e.message, logger.ERROR) @@ -747,7 +747,7 @@ def run(self): self.show.load_imdb_info() except IMDbHTTPError as e: logger.log(u'{id}: Something wrong on IMDb api: {error_msg}'.format - (id=self.show.indexerid, error_msg=e.message), logger.WARNING) + (id=self.show.indexerid, error_msg=e.message), logger.INFO) except Exception as e: logger.log(u'{id}: Error loading IMDb info: {error_msg}'.format (id=self.show.indexerid, error_msg=e.message), logger.WARNING) @@ -868,7 +868,7 @@ def run(self): self.show.load_imdb_info() except IMDbHTTPError as e: logger.log(u'{id}: Something wrong on IMDb api: {error_msg}'.format - (id=self.show.indexerid, error_msg=e.message), logger.WARNING) + (id=self.show.indexerid, error_msg=e.message), logger.INFO) except Exception as e: logger.log(u'{id}: Error loading IMDb info: {error_msg}'.format (id=self.show.indexerid, error_msg=e.message), logger.WARNING) diff --git a/medusa/tv/series.py b/medusa/tv/series.py index 8f60956b17..af5c0d41aa 100644 --- a/medusa/tv/series.py +++ b/medusa/tv/series.py @@ -1536,6 +1536,8 @@ def load_from_indexer(self, tvapi=None): def load_imdb_info(self): """Load all required show information from IMDb with ImdbPie.""" + # TODO: Use new parser or wait upstream API fix + return imdb_api = imdbpie.Imdb() if not self.imdb_id: From a6a6b7e29d719fcfd7d776e6dc3b83aa7ced18a6 Mon Sep 17 00:00:00 2001 From: Dario Date: Sun, 7 Jan 2018 18:17:36 +0100 Subject: [PATCH 25/35] Don't encode paths with UTF-8 for Windows (#3581) --- medusa/subtitles.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/medusa/subtitles.py b/medusa/subtitles.py index 2cda8d61e4..131031553e 100644 --- a/medusa/subtitles.py +++ b/medusa/subtitles.py @@ -42,6 +42,7 @@ from medusa.subtitle_providers.utils import hash_itasa from six import iteritems, string_types, text_type + from subliminal import ProviderPool, compute_score, provider_manager, refine, save_subtitles, scan_video from subliminal.core import search_external_subtitles from subliminal.score import episode_scores @@ -593,7 +594,7 @@ def get_current_subtitles(tv_episode): return get_subtitles(video) -def _encode(value, encoding='utf-8', fallback=None): +def _encode(value, fallback=None): """Encode the value using the specified encoding. It fallbacks to the specified encoding or SYS_ENCODING if not defined @@ -607,6 +608,8 @@ def _encode(value, encoding='utf-8', fallback=None): :return: the encoded value :rtype: str """ + encoding = 'utf-8' if os.name != 'nt' else app.SYS_ENCODING + try: return value.encode(encoding) except UnicodeEncodeError: @@ -615,7 +618,7 @@ def _encode(value, encoding='utf-8', fallback=None): return value.encode(fallback or app.SYS_ENCODING) -def _decode(value, encoding='utf-8', fallback=None): +def _decode(value, fallback=None): """Decode the value using the specified encoding. It fallbacks to the specified encoding or SYS_ENCODING if not defined @@ -629,12 +632,14 @@ def _decode(value, encoding='utf-8', fallback=None): :return: the decoded value :rtype: unicode """ + encoding = 'utf-8' if os.name != 'nt' else app.SYS_ENCODING + try: - return value.decode(encoding) + return text_type(value, encoding) except UnicodeDecodeError: logger.debug(u'Failed to decode to %s, falling back to %s: %r', encoding, fallback or app.SYS_ENCODING, value) - return value.decode(fallback or app.SYS_ENCODING) + return text_type(value, fallback or app.SYS_ENCODING) def get_subtitle_description(subtitle): @@ -1000,7 +1005,7 @@ def dhm(td): ep_num = episode_num(ep_to_sub['season'], ep_to_sub['episode']) or \ episode_num(ep_to_sub['season'], ep_to_sub['episode'], numbering='absolute') - subtitle_path = _encode(ep_to_sub['location'], encoding=app.SYS_ENCODING, fallback='utf-8') + subtitle_path = _encode(ep_to_sub['location'], fallback='utf-8') if not os.path.isfile(subtitle_path): logger.debug(u'Episode file does not exist, cannot download subtitles for %s %s', ep_to_sub['show_name'], ep_num) From 9c39360a6f782dfc618a6db9b21bb876acafb05a Mon Sep 17 00:00:00 2001 From: h3llrais3r Date: Sun, 7 Jan 2018 20:42:21 +0100 Subject: [PATCH 26/35] Show anime checkbox when adding existing shows (#3568) * Show anime checkbox when adding existing shows * Fix adding existing show with prompt for settings * Fix javascript equal check --- medusa/server/web/home/add_shows.py | 4 ++-- static/js/add-shows/add-existing-show.js | 3 +++ static/js/add-shows/new-show.js | 2 +- views/addShows_addExistingShow.mako | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/medusa/server/web/home/add_shows.py b/medusa/server/web/home/add_shows.py index 0595eb4f77..f031e08ce5 100644 --- a/medusa/server/web/home/add_shows.py +++ b/medusa/server/web/home/add_shows.py @@ -362,8 +362,8 @@ def existingShows(self): Prints out the page to add existing shows from a root dir """ t = PageTemplate(rh=self, filename='addShows_addExistingShow.mako') - return t.render(enable_anime_options=False, title='Existing Show', - header='Existing Show', topmenu='home', + return t.render(enable_anime_options=True, blacklist=[], whitelist=[], groups=[], + title='Existing Show', header='Existing Show', topmenu='home', controller='addShows', action='addExistingShow') def addShowByID(self, indexer_id, show_name=None, indexer="TVDB", which_series=None, diff --git a/static/js/add-shows/add-existing-show.js b/static/js/add-shows/add-existing-show.js index 09b7c95757..1000161623 100644 --- a/static/js/add-shows/add-existing-show.js +++ b/static/js/add-shows/add-existing-show.js @@ -1,4 +1,7 @@ MEDUSA.addShows.addExistingShow = function() { + // Hide the black/whitelist, because it can only be used for a single anime show + $.updateBlackWhiteList(undefined); + $('#tableDiv').on('click', '#checkAll', function() { var seasonCheck = this; $('.dirCheck').each(function() { diff --git a/static/js/add-shows/new-show.js b/static/js/add-shows/new-show.js index e3907ce4cd..7a973f21d7 100644 --- a/static/js/add-shows/new-show.js +++ b/static/js/add-shows/new-show.js @@ -150,7 +150,7 @@ MEDUSA.addShows.newShow = function() { $('#addShowButton').click(function() { // if they haven't picked a show don't let them submit - if (!$('input:radio[name="whichSeries"]:checked').val() && $('input:hidden[name="whichSeries"]').val().length !== 0) { + if (!$('input:radio[name="whichSeries"]:checked').val() && $('input:hidden[name="whichSeries"]').val().length === 0) { alert('You must choose a show to continue'); // eslint-disable-line no-alert return false; } diff --git a/views/addShows_addExistingShow.mako b/views/addShows_addExistingShow.mako index bca57aabcf..0440d6b263 100644 --- a/views/addShows_addExistingShow.mako +++ b/views/addShows_addExistingShow.mako @@ -17,7 +17,7 @@ ## @TODO: Fix this stupid hack
-
+
  • Manage Directories
  • From 8bb6e2278ae36fa4111f4215d9fcab55c5ed7b2a Mon Sep 17 00:00:00 2001 From: p0ps Date: Tue, 9 Jan 2018 10:46:23 +0100 Subject: [PATCH 27/35] Fix typo in the image image_update summary. (#3588) --- medusa/server/web/manage/handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/server/web/manage/handler.py b/medusa/server/web/manage/handler.py index df7c2f39fa..8d73ad1614 100644 --- a/medusa/server/web/manage/handler.py +++ b/medusa/server/web/manage/handler.py @@ -744,7 +744,7 @@ def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None if subtitles: message += '\nSubtitles: {0}'.format(len(subtitles)) if image_update: - message += '\nImage update: {0}'.format(len(subtitles)) + message += '\nImage updates: {0}'.format(len(image_update)) if message: ui.notifications.message('Queued actions:', message) From be79a12ed54519cda644f0a87e20ce757fcfb45f Mon Sep 17 00:00:00 2001 From: Stefaan Ghysels Date: Tue, 9 Jan 2018 11:55:42 +0100 Subject: [PATCH 28/35] Only apply UTF-8 monkey patch when necessary (#3573) * Apply the UTF-8 locale patch only when necessary * Cleanup args * Only apply input/output patches when the handler is callable * zzz * Flake8 fix * removed trailing spaces --- medusa/init/filesystem.py | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/medusa/init/filesystem.py b/medusa/init/filesystem.py index 43efd86f97..fbaf8fd52a 100644 --- a/medusa/init/filesystem.py +++ b/medusa/init/filesystem.py @@ -80,9 +80,32 @@ def _varkwargs(**kwargs): return {k: _handle_input(arg) for k, arg in kwargs.items()} -def make_closure(f, handle_arg, handle_output): - """Create a closure that encodes parameters to utf-8 and call original function.""" - return lambda *args, **kwargs: handle_output(f(*[handle_arg(arg) for arg in args], **{k: handle_arg(arg) for k, arg in kwargs.items()})) +def make_closure(f, handle_arg=None, handle_output=None): + """Apply an input handler and output handler to a function. + + Used to ensure UTF-8 encoding at input and output. + """ + return patch_output(patch_input(f, handle_arg), handle_output) + + +def patch_input(f, handle_arg=None): + """Patch all args and kwargs of function f. + + If handle_arg is None, just return the original function. + """ + def patched_input(*args, **kwargs): + return f(*[handle_arg(arg) for arg in args], **{k: handle_arg(arg) for k, arg in kwargs.items()}) + return patched_input if callable(handle_arg) else f + + +def patch_output(f, handle_output=None): + """Patch the output of function f with the handle_output function. + + If handle_output is None, just return the original function. + """ + def patched_output(*args, **kwargs): + return handle_output(f(*args, **kwargs)) + return patched_output if callable(handle_output) else f def initialize(): @@ -113,9 +136,12 @@ def initialize(): if os.name != 'nt': affected_functions[os].extend(['chmod', 'chown', 'link', 'statvfs', 'symlink']) - handle_arg = _handle_input if not fs_encoding or fs_encoding.lower() != 'utf-8' else lambda x: x + if not fs_encoding or fs_encoding.lower() != 'utf-8': + handle_input = _handle_input + else: + handle_input = None for k, v in affected_functions.items(): handle_output = handle_output_map.get(k, _handle_output_u) for f in v: - setattr(k, f, make_closure(getattr(k, f), handle_arg, handle_output)) + setattr(k, f, make_closure(getattr(k, f), handle_input, handle_output)) From c794cd4316422cc74a05d70831312938e3266c4b Mon Sep 17 00:00:00 2001 From: Dario Date: Tue, 9 Jan 2018 21:02:00 +0100 Subject: [PATCH 29/35] Don't encode input for Windows (#3589) --- medusa/init/filesystem.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/medusa/init/filesystem.py b/medusa/init/filesystem.py index fbaf8fd52a..654e57e071 100644 --- a/medusa/init/filesystem.py +++ b/medusa/init/filesystem.py @@ -9,6 +9,7 @@ import tarfile import certifi + from six import binary_type, text_type @@ -136,7 +137,7 @@ def initialize(): if os.name != 'nt': affected_functions[os].extend(['chmod', 'chown', 'link', 'statvfs', 'symlink']) - if not fs_encoding or fs_encoding.lower() != 'utf-8': + if not fs_encoding or fs_encoding.lower() not in ('utf-8', 'mbcs'): handle_input = _handle_input else: handle_input = None From e3c0476f430830b46d8fad4b69a35d7030677d2d Mon Sep 17 00:00:00 2001 From: Fernando Date: Thu, 11 Jan 2018 13:41:57 -0200 Subject: [PATCH 30/35] Fix move torrent key error (#3598) --- medusa/process_tv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/medusa/process_tv.py b/medusa/process_tv.py index 1fc09b0416..43fcf5fc86 100644 --- a/medusa/process_tv.py +++ b/medusa/process_tv.py @@ -171,7 +171,7 @@ def process(self, resource_name=None, force=False, is_priority=None, delete_on=F to_remove_hashes = app.RECENTLY_POSTPROCESSED.items() for info_hash, release_names in to_remove_hashes: if self.move_torrent(info_hash, release_names): - app.RECENTLY_POSTPROCESSED.pop(info_hash) + app.RECENTLY_POSTPROCESSED.pop(info_hash, None) return self.output From 649d43cf6fbc52a5e9230dde170e88ed1ad4f73a Mon Sep 17 00:00:00 2001 From: p0ps Date: Fri, 12 Jan 2018 12:39:09 +0100 Subject: [PATCH 31/35] Update imdbpie to release 5.2.0 (#3595) * Update imdbpie to release 5.0.0 * Update requirements.txt * Update imdb-pie to v5.2.0 * Set imdb-pie version to 5.2.0 in requirements * Add safe_get() to utils * Update HTTPError to ImdbAPIError * Fix IMDb * Rename imdb_obj to imdb_info --- ext/boto/__init__.py | 1216 +++++ ext/boto/auth.py | 1099 ++++ ext/boto/auth_handler.py | 60 + ext/boto/awslambda/__init__.py | 40 + ext/boto/awslambda/exceptions.py | 38 + ext/boto/awslambda/layer1.py | 517 ++ ext/boto/beanstalk/__init__.py | 44 + ext/boto/beanstalk/exception.py | 63 + ext/boto/beanstalk/layer1.py | 1201 +++++ ext/boto/beanstalk/response.py | 704 +++ ext/boto/beanstalk/wrapper.py | 29 + ext/boto/cacerts/__init__.py | 22 + ext/boto/cacerts/cacerts.txt | 3837 ++++++++++++++ ext/boto/cloudformation/__init__.py | 55 + ext/boto/cloudformation/connection.py | 922 ++++ ext/boto/cloudformation/stack.py | 423 ++ ext/boto/cloudformation/template.py | 51 + ext/boto/cloudfront/__init__.py | 326 ++ ext/boto/cloudfront/distribution.py | 757 +++ ext/boto/cloudfront/exception.py | 26 + ext/boto/cloudfront/identity.py | 121 + ext/boto/cloudfront/invalidation.py | 216 + ext/boto/cloudfront/logging.py | 38 + ext/boto/cloudfront/object.py | 48 + ext/boto/cloudfront/origin.py | 150 + ext/boto/cloudfront/signers.py | 59 + ext/boto/cloudhsm/__init__.py | 41 + ext/boto/cloudhsm/exceptions.py | 35 + ext/boto/cloudhsm/layer1.py | 448 ++ ext/boto/cloudsearch/__init__.py | 42 + ext/boto/cloudsearch/document.py | 271 + ext/boto/cloudsearch/domain.py | 394 ++ ext/boto/cloudsearch/layer1.py | 747 +++ ext/boto/cloudsearch/layer2.py | 75 + ext/boto/cloudsearch/optionstatus.py | 248 + ext/boto/cloudsearch/search.py | 377 ++ ext/boto/cloudsearch/sourceattribute.py | 74 + ext/boto/cloudsearch2/__init__.py | 39 + ext/boto/cloudsearch2/document.py | 315 ++ ext/boto/cloudsearch2/domain.py | 542 ++ ext/boto/cloudsearch2/exceptions.py | 46 + ext/boto/cloudsearch2/layer1.py | 783 +++ ext/boto/cloudsearch2/layer2.py | 94 + ext/boto/cloudsearch2/optionstatus.py | 233 + ext/boto/cloudsearch2/search.py | 452 ++ ext/boto/cloudsearchdomain/__init__.py | 41 + ext/boto/cloudsearchdomain/exceptions.py | 30 + ext/boto/cloudsearchdomain/layer1.py | 540 ++ ext/boto/cloudtrail/__init__.py | 41 + ext/boto/cloudtrail/exceptions.py | 118 + ext/boto/cloudtrail/layer1.py | 374 ++ ext/boto/codedeploy/__init__.py | 40 + ext/boto/codedeploy/exceptions.py | 199 + ext/boto/codedeploy/layer1.py | 899 ++++ ext/boto/cognito/__init__.py | 21 + ext/boto/cognito/identity/__init__.py | 42 + ext/boto/cognito/identity/exceptions.py | 44 + ext/boto/cognito/identity/layer1.py | 549 ++ ext/boto/cognito/sync/__init__.py | 41 + ext/boto/cognito/sync/exceptions.py | 54 + ext/boto/cognito/sync/layer1.py | 494 ++ ext/boto/compat.py | 102 + ext/boto/configservice/__init__.py | 41 + ext/boto/configservice/exceptions.py | 103 + ext/boto/configservice/layer1.py | 385 ++ ext/boto/connection.py | 1227 +++++ ext/boto/datapipeline/__init__.py | 41 + ext/boto/datapipeline/exceptions.py | 42 + ext/boto/datapipeline/layer1.py | 639 +++ ext/boto/directconnect/__init__.py | 41 + ext/boto/directconnect/exceptions.py | 29 + ext/boto/directconnect/layer1.py | 627 +++ ext/boto/dynamodb/__init__.py | 41 + ext/boto/dynamodb/batch.py | 261 + ext/boto/dynamodb/condition.py | 170 + ext/boto/dynamodb/exceptions.py | 64 + ext/boto/dynamodb/item.py | 202 + ext/boto/dynamodb/layer1.py | 577 +++ ext/boto/dynamodb/layer2.py | 806 +++ ext/boto/dynamodb/schema.py | 112 + ext/boto/dynamodb/table.py | 546 ++ ext/boto/dynamodb/types.py | 410 ++ ext/boto/dynamodb2/__init__.py | 42 + ext/boto/dynamodb2/exceptions.py | 78 + ext/boto/dynamodb2/fields.py | 337 ++ ext/boto/dynamodb2/items.py | 473 ++ ext/boto/dynamodb2/layer1.py | 2904 +++++++++++ ext/boto/dynamodb2/results.py | 204 + ext/boto/dynamodb2/table.py | 1723 +++++++ ext/boto/dynamodb2/types.py | 44 + ext/boto/ec2/__init__.py | 84 + ext/boto/ec2/address.py | 130 + ext/boto/ec2/attributes.py | 71 + ext/boto/ec2/autoscale/__init__.py | 894 ++++ ext/boto/ec2/autoscale/activity.py | 73 + ext/boto/ec2/autoscale/group.py | 361 ++ ext/boto/ec2/autoscale/instance.py | 59 + ext/boto/ec2/autoscale/launchconfig.py | 270 + ext/boto/ec2/autoscale/limits.py | 44 + ext/boto/ec2/autoscale/policy.py | 181 + ext/boto/ec2/autoscale/request.py | 38 + ext/boto/ec2/autoscale/scheduled.py | 77 + ext/boto/ec2/autoscale/tag.py | 84 + ext/boto/ec2/blockdevicemapping.py | 165 + ext/boto/ec2/bundleinstance.py | 78 + ext/boto/ec2/buyreservation.py | 85 + ext/boto/ec2/cloudwatch/__init__.py | 593 +++ ext/boto/ec2/cloudwatch/alarm.py | 323 ++ ext/boto/ec2/cloudwatch/datapoint.py | 40 + ext/boto/ec2/cloudwatch/dimension.py | 38 + ext/boto/ec2/cloudwatch/listelement.py | 30 + ext/boto/ec2/cloudwatch/metric.py | 169 + ext/boto/ec2/connection.py | 4527 +++++++++++++++++ ext/boto/ec2/ec2object.py | 144 + ext/boto/ec2/elb/__init__.py | 757 +++ ext/boto/ec2/elb/attributes.py | 154 + ext/boto/ec2/elb/healthcheck.py | 89 + ext/boto/ec2/elb/instancestate.py | 63 + ext/boto/ec2/elb/listelement.py | 36 + ext/boto/ec2/elb/listener.py | 87 + ext/boto/ec2/elb/loadbalancer.py | 419 ++ ext/boto/ec2/elb/policies.py | 108 + ext/boto/ec2/elb/securitygroup.py | 38 + ext/boto/ec2/group.py | 38 + ext/boto/ec2/image.py | 445 ++ ext/boto/ec2/instance.py | 677 +++ ext/boto/ec2/instanceinfo.py | 49 + ext/boto/ec2/instancestatus.py | 212 + ext/boto/ec2/instancetype.py | 59 + ext/boto/ec2/keypair.py | 111 + ext/boto/ec2/launchspecification.py | 105 + ext/boto/ec2/networkinterface.py | 351 ++ ext/boto/ec2/placementgroup.py | 53 + ext/boto/ec2/regioninfo.py | 36 + ext/boto/ec2/reservedinstance.py | 352 ++ ext/boto/ec2/securitygroup.py | 392 ++ ext/boto/ec2/snapshot.py | 202 + ext/boto/ec2/spotdatafeedsubscription.py | 65 + ext/boto/ec2/spotinstancerequest.py | 192 + ext/boto/ec2/spotpricehistory.py | 54 + ext/boto/ec2/tag.py | 84 + ext/boto/ec2/volume.py | 315 ++ ext/boto/ec2/volumestatus.py | 205 + ext/boto/ec2/zone.py | 78 + ext/boto/ec2containerservice/__init__.py | 42 + ext/boto/ec2containerservice/exceptions.py | 31 + ext/boto/ec2containerservice/layer1.py | 748 +++ ext/boto/ecs/__init__.py | 105 + ext/boto/ecs/item.py | 164 + ext/boto/elasticache/__init__.py | 41 + ext/boto/elasticache/layer1.py | 1664 ++++++ ext/boto/elastictranscoder/__init__.py | 45 + ext/boto/elastictranscoder/exceptions.py | 50 + ext/boto/elastictranscoder/layer1.py | 932 ++++ ext/boto/emr/__init__.py | 48 + ext/boto/emr/bootstrap_action.py | 46 + ext/boto/emr/connection.py | 765 +++ ext/boto/emr/emrobject.py | 511 ++ ext/boto/emr/instance_group.py | 43 + ext/boto/emr/step.py | 283 ++ ext/boto/endpoints.json | 1296 +++++ ext/boto/endpoints.py | 239 + ext/boto/exception.py | 585 +++ ext/boto/file/README | 49 + ext/boto/file/__init__.py | 28 + ext/boto/file/bucket.py | 112 + ext/boto/file/connection.py | 33 + ext/boto/file/key.py | 201 + ext/boto/file/simpleresultset.py | 30 + ext/boto/fps/__init__.py | 21 + ext/boto/fps/connection.py | 395 ++ ext/boto/fps/exception.py | 344 ++ ext/boto/fps/response.py | 207 + ext/boto/glacier/__init__.py | 41 + ext/boto/glacier/concurrent.py | 425 ++ ext/boto/glacier/exceptions.py | 58 + ext/boto/glacier/job.py | 177 + ext/boto/glacier/layer1.py | 1279 +++++ ext/boto/glacier/layer2.py | 101 + ext/boto/glacier/response.py | 49 + ext/boto/glacier/utils.py | 175 + ext/boto/glacier/vault.py | 450 ++ ext/boto/glacier/writer.py | 262 + ext/boto/gs/__init__.py | 22 + ext/boto/gs/acl.py | 308 ++ ext/boto/gs/bucket.py | 1001 ++++ ext/boto/gs/bucketlistresultset.py | 64 + ext/boto/gs/connection.py | 129 + ext/boto/gs/cors.py | 169 + ext/boto/gs/key.py | 948 ++++ ext/boto/gs/lifecycle.py | 224 + ext/boto/gs/resumable_upload_handler.py | 679 +++ ext/boto/gs/user.py | 54 + ext/boto/handler.py | 60 + ext/boto/https_connection.py | 138 + ext/boto/iam/__init__.py | 93 + ext/boto/iam/connection.py | 1932 +++++++ ext/boto/iam/summarymap.py | 42 + ext/boto/jsonresponse.py | 168 + ext/boto/kinesis/__init__.py | 41 + ext/boto/kinesis/exceptions.py | 51 + ext/boto/kinesis/layer1.py | 879 ++++ ext/boto/kms/__init__.py | 41 + ext/boto/kms/exceptions.py | 72 + ext/boto/kms/layer1.py | 821 +++ ext/boto/logs/__init__.py | 41 + ext/boto/logs/exceptions.py | 59 + ext/boto/logs/layer1.py | 576 +++ ext/boto/machinelearning/__init__.py | 42 + ext/boto/machinelearning/exceptions.py | 51 + ext/boto/machinelearning/layer1.py | 1408 +++++ ext/boto/manage/__init__.py | 23 + ext/boto/manage/cmdshell.py | 407 ++ ext/boto/manage/propget.py | 63 + ext/boto/manage/server.py | 556 ++ ext/boto/manage/task.py | 176 + ext/boto/manage/test_manage.py | 34 + ext/boto/manage/volume.py | 420 ++ ext/boto/mashups/__init__.py | 23 + ext/boto/mashups/interactive.py | 97 + ext/boto/mashups/iobject.py | 114 + ext/boto/mashups/order.py | 211 + ext/boto/mashups/server.py | 395 ++ ext/boto/mturk/__init__.py | 23 + ext/boto/mturk/connection.py | 1052 ++++ ext/boto/mturk/layoutparam.py | 55 + ext/boto/mturk/notification.py | 103 + ext/boto/mturk/price.py | 48 + ext/boto/mturk/qualification.py | 157 + ext/boto/mturk/question.py | 455 ++ ext/boto/mws/__init__.py | 21 + ext/boto/mws/connection.py | 1168 +++++ ext/boto/mws/exception.py | 70 + ext/boto/mws/response.py | 787 +++ ext/boto/opsworks/__init__.py | 41 + ext/boto/opsworks/exceptions.py | 30 + ext/boto/opsworks/layer1.py | 3094 +++++++++++ ext/boto/plugin.py | 93 + ext/boto/provider.py | 484 ++ ext/boto/pyami/__init__.py | 22 + ext/boto/pyami/bootstrap.py | 134 + ext/boto/pyami/config.py | 235 + ext/boto/pyami/copybot.cfg | 60 + ext/boto/pyami/copybot.py | 96 + ext/boto/pyami/helloworld.py | 27 + ext/boto/pyami/installers/__init__.py | 63 + ext/boto/pyami/installers/ubuntu/__init__.py | 22 + ext/boto/pyami/installers/ubuntu/apache.py | 43 + ext/boto/pyami/installers/ubuntu/ebs.py | 238 + ext/boto/pyami/installers/ubuntu/installer.py | 94 + ext/boto/pyami/installers/ubuntu/mysql.py | 108 + ext/boto/pyami/installers/ubuntu/trac.py | 139 + ext/boto/pyami/launch_ami.py | 177 + ext/boto/pyami/scriptbase.py | 43 + ext/boto/pyami/startup.py | 60 + ext/boto/rds/__init__.py | 1622 ++++++ ext/boto/rds/dbinstance.py | 416 ++ ext/boto/rds/dbsecuritygroup.py | 186 + ext/boto/rds/dbsnapshot.py | 138 + ext/boto/rds/dbsubnetgroup.py | 69 + ext/boto/rds/event.py | 49 + ext/boto/rds/logfile.py | 68 + ext/boto/rds/optiongroup.py | 404 ++ ext/boto/rds/parametergroup.py | 201 + ext/boto/rds/regioninfo.py | 33 + ext/boto/rds/statusinfo.py | 54 + ext/boto/rds/vpcsecuritygroupmembership.py | 85 + ext/boto/rds2/__init__.py | 53 + ext/boto/rds2/exceptions.py | 234 + ext/boto/rds2/layer1.py | 3783 ++++++++++++++ ext/boto/redshift/__init__.py | 41 + ext/boto/redshift/exceptions.py | 459 ++ ext/boto/redshift/layer1.py | 3097 +++++++++++ ext/boto/regioninfo.py | 290 ++ ext/boto/requestlog.py | 39 + ext/boto/resultset.py | 176 + ext/boto/roboto/__init__.py | 1 + ext/boto/roboto/awsqueryrequest.py | 503 ++ ext/boto/roboto/awsqueryservice.py | 122 + ext/boto/roboto/param.py | 147 + ext/boto/route53/__init__.py | 94 + ext/boto/route53/connection.py | 613 +++ ext/boto/route53/domains/__init__.py | 40 + ext/boto/route53/domains/exceptions.py | 46 + ext/boto/route53/domains/layer1.py | 868 ++++ ext/boto/route53/exception.py | 27 + ext/boto/route53/healthcheck.py | 146 + ext/boto/route53/hostedzone.py | 51 + ext/boto/route53/record.py | 374 ++ ext/boto/route53/status.py | 42 + ext/boto/route53/zone.py | 419 ++ ext/boto/s3/__init__.py | 75 + ext/boto/s3/acl.py | 171 + ext/boto/s3/bucket.py | 1878 +++++++ ext/boto/s3/bucketlistresultset.py | 158 + ext/boto/s3/bucketlogging.py | 83 + ext/boto/s3/connection.py | 672 +++ ext/boto/s3/cors.py | 210 + ext/boto/s3/deletemarker.py | 55 + ext/boto/s3/key.py | 1933 +++++++ ext/boto/s3/keyfile.py | 134 + ext/boto/s3/lifecycle.py | 311 ++ ext/boto/s3/multidelete.py | 138 + ext/boto/s3/multipart.py | 330 ++ ext/boto/s3/prefix.py | 42 + ext/boto/s3/resumable_download_handler.py | 352 ++ ext/boto/s3/tagging.py | 71 + ext/boto/s3/user.py | 49 + ext/boto/s3/website.py | 293 ++ ext/boto/sdb/__init__.py | 53 + ext/boto/sdb/connection.py | 618 +++ ext/boto/sdb/db/__init__.py | 20 + ext/boto/sdb/db/blob.py | 76 + ext/boto/sdb/db/key.py | 59 + ext/boto/sdb/db/manager/__init__.py | 85 + ext/boto/sdb/db/manager/sdbmanager.py | 738 +++ ext/boto/sdb/db/manager/xmlmanager.py | 517 ++ ext/boto/sdb/db/model.py | 296 ++ ext/boto/sdb/db/property.py | 704 +++ ext/boto/sdb/db/query.py | 86 + ext/boto/sdb/db/sequence.py | 224 + ext/boto/sdb/db/test_db.py | 231 + ext/boto/sdb/domain.py | 380 ++ ext/boto/sdb/item.py | 177 + ext/boto/sdb/queryresultset.py | 93 + ext/boto/sdb/regioninfo.py | 33 + ext/boto/services/__init__.py | 23 + ext/boto/services/bs.py | 180 + ext/boto/services/message.py | 58 + ext/boto/services/result.py | 135 + ext/boto/services/service.py | 161 + ext/boto/services/servicedef.py | 91 + ext/boto/services/sonofmmm.cfg | 43 + ext/boto/services/sonofmmm.py | 81 + ext/boto/services/submit.py | 87 + ext/boto/ses/__init__.py | 51 + ext/boto/ses/connection.py | 565 ++ ext/boto/ses/exceptions.py | 80 + ext/boto/sns/__init__.py | 53 + ext/boto/sns/connection.py | 765 +++ ext/boto/sqs/__init__.py | 46 + ext/boto/sqs/attributes.py | 46 + ext/boto/sqs/batchresults.py | 95 + ext/boto/sqs/bigmessage.py | 119 + ext/boto/sqs/connection.py | 596 +++ ext/boto/sqs/jsonmessage.py | 43 + ext/boto/sqs/message.py | 271 + ext/boto/sqs/messageattributes.py | 66 + ext/boto/sqs/queue.py | 541 ++ ext/boto/sqs/regioninfo.py | 33 + ext/boto/storage_uri.py | 905 ++++ ext/boto/sts/__init__.py | 51 + ext/boto/sts/connection.py | 652 +++ ext/boto/sts/credentials.py | 237 + ext/boto/support/__init__.py | 41 + ext/boto/support/exceptions.py | 58 + ext/boto/support/layer1.py | 674 +++ ext/boto/swf/__init__.py | 45 + ext/boto/swf/exceptions.py | 44 + ext/boto/swf/layer1.py | 1513 ++++++ ext/boto/swf/layer1_decisions.py | 287 ++ ext/boto/swf/layer2.py | 347 ++ ext/boto/utils.py | 1091 ++++ ext/boto/vendored/__init__.py | 0 ext/boto/vendored/regions/__init__.py | 21 + ext/boto/vendored/regions/exceptions.py | 27 + ext/boto/vendored/regions/regions.py | 188 + ext/boto/vendored/six.py | 868 ++++ ext/boto/vpc/__init__.py | 1827 +++++++ ext/boto/vpc/customergateway.py | 54 + ext/boto/vpc/dhcpoptions.py | 72 + ext/boto/vpc/internetgateway.py | 72 + ext/boto/vpc/networkacl.py | 164 + ext/boto/vpc/routetable.py | 118 + ext/boto/vpc/subnet.py | 57 + ext/boto/vpc/vpc.py | 204 + ext/boto/vpc/vpc_peering_connection.py | 163 + ext/boto/vpc/vpnconnection.py | 204 + ext/boto/vpc/vpngateway.py | 87 + ext/certifi/__init__.py | 2 +- ext/certifi/cacert.pem | 793 +-- ext/certifi/core.py | 3 +- ext/certifi/weak.pem | 793 +-- ext/diskcache/__init__.py | 34 + ext/diskcache/cli.py | 1 + ext/diskcache/core.py | 1804 +++++++ ext/diskcache/djangocache.py | 321 ++ ext/diskcache/fanout.py | 571 +++ ext/diskcache/memo.py | 105 + ext/diskcache/persistent.py | 1313 +++++ ext/diskcache/stampede.py | 75 + ext/funcsigs/__init__.py | 829 +++ ext/funcsigs/version.py | 1 + ext/imdbpie/__init__.py | 2 + ext/imdbpie/auth.py | 139 + ext/imdbpie/constants.py | 70 +- ext/imdbpie/exceptions.py | 4 +- ext/imdbpie/imdbpie.py | 405 +- ext/imdbpie/objects.py | 217 - ext/js2py/__init__.py | 4 +- ext/js2py/host/jsfunctions.py | 25 +- ext/js2py/node_import.py | 88 + ext/js2py/py_node_modules/__init__.py | 1 + ext/js2py/pyjs.py | 11 +- ext/mock/__init__.py | 7 + ext/mock/mock.py | 2553 ++++++++++ ext/mock/tests/__init__.py | 3 + ext/mock/tests/__main__.py | 18 + ext/mock/tests/support.py | 36 + ext/mock/tests/testcallable.py | 158 + ext/mock/tests/testhelpers.py | 975 ++++ ext/mock/tests/testmagicmethods.py | 533 ++ ext/mock/tests/testmock.py | 1593 ++++++ ext/mock/tests/testpatch.py | 1883 +++++++ ext/mock/tests/testsentinel.py | 33 + ext/mock/tests/testwith.py | 306 ++ ext/msgpack/_version.py | 2 +- ext/msgpack/exceptions.py | 22 +- ext/msgpack/fallback.py | 623 ++- ext/pkg_resources/__init__.py | 112 +- ext/pytz/__init__.py | 43 +- ext/pytz/zoneinfo/Africa/Juba | Bin 683 -> 683 bytes ext/pytz/zoneinfo/Africa/Khartoum | Bin 683 -> 713 bytes ext/pytz/zoneinfo/Africa/Windhoek | Bin 1592 -> 1030 bytes ext/pytz/zoneinfo/America/Adak | Bin 2365 -> 2365 bytes ext/pytz/zoneinfo/America/Anchorage | Bin 2380 -> 2380 bytes ext/pytz/zoneinfo/America/Atka | Bin 2365 -> 2365 bytes ext/pytz/zoneinfo/America/Detroit | Bin 2216 -> 2188 bytes ext/pytz/zoneinfo/America/Grand_Turk | Bin 1287 -> 1881 bytes ext/pytz/zoneinfo/America/Juneau | Bin 2362 -> 2362 bytes ext/pytz/zoneinfo/America/Metlakatla | Bin 1418 -> 1418 bytes ext/pytz/zoneinfo/America/Nome | Bin 2376 -> 2376 bytes ext/pytz/zoneinfo/America/Sitka | Bin 2350 -> 2350 bytes ext/pytz/zoneinfo/America/Yakutat | Bin 2314 -> 2314 bytes ext/pytz/zoneinfo/Asia/Calcutta | Bin 293 -> 312 bytes ext/pytz/zoneinfo/Asia/Famagusta | Bin 1445 -> 2042 bytes ext/pytz/zoneinfo/Asia/Kolkata | Bin 293 -> 312 bytes ext/pytz/zoneinfo/Asia/Rangoon | Bin 297 -> 297 bytes ext/pytz/zoneinfo/Asia/Yangon | Bin 297 -> 297 bytes ext/pytz/zoneinfo/Asia/Yerevan | Bin 1199 -> 1213 bytes ext/pytz/zoneinfo/Canada/East-Saskatchewan | Bin 994 -> 0 bytes ext/pytz/zoneinfo/Eire | Bin 3559 -> 3543 bytes ext/pytz/zoneinfo/Europe/Dublin | Bin 3559 -> 3543 bytes ext/pytz/zoneinfo/Pacific/Apia | Bin 1134 -> 1134 bytes ext/pytz/zoneinfo/Pacific/Fiji | Bin 1102 -> 1104 bytes ext/pytz/zoneinfo/Pacific/Midway | Bin 196 -> 196 bytes ext/pytz/zoneinfo/Pacific/Pago_Pago | Bin 196 -> 196 bytes ext/pytz/zoneinfo/Pacific/Samoa | Bin 196 -> 196 bytes ext/pytz/zoneinfo/Pacific/Tongatapu | Bin 1003 -> 393 bytes ext/pytz/zoneinfo/US/Alaska | Bin 2380 -> 2380 bytes ext/pytz/zoneinfo/US/Aleutian | Bin 2365 -> 2365 bytes ext/pytz/zoneinfo/US/Michigan | Bin 2216 -> 2188 bytes ext/pytz/zoneinfo/US/Samoa | Bin 196 -> 196 bytes ext/pytz/zoneinfo/leapseconds | 61 + ext/pytz/zoneinfo/tzdata.zi | 4146 +++++++++++++++ ext/pytz/zoneinfo/zone.tab | 4 +- ext/pytz/zoneinfo/zone1970.tab | 7 +- ext/setuptools/__init__.py | 22 +- ext/setuptools/archive_util.py | 4 +- ext/setuptools/build_meta.py | 172 + ext/setuptools/command/__init__.py | 1 + ext/setuptools/command/bdist_egg.py | 18 +- ext/setuptools/command/develop.py | 4 +- ext/setuptools/command/dist_info.py | 36 + ext/setuptools/command/easy_install.py | 35 +- ext/setuptools/command/egg_info.py | 11 +- ext/setuptools/command/sdist.py | 7 - ext/setuptools/command/test.py | 18 +- ext/setuptools/config.py | 4 +- ext/setuptools/dist.py | 64 +- ext/setuptools/glibc.py | 86 + ext/setuptools/monkey.py | 2 +- ext/setuptools/package_index.py | 60 +- ext/setuptools/pep425tags.py | 316 ++ ext/setuptools/py26compat.py | 31 - ext/setuptools/py31compat.py | 15 - ext/setuptools/sandbox.py | 4 - ext/setuptools/ssl_support.py | 11 +- ext/setuptools/wheel.py | 163 + ext/tzlocal/__init__.py | 2 - ext/tzlocal/darwin.py | 59 - ext/tzlocal/test_data/timezone/etc/timezone | 2 +- .../test_data/vardbzoneinfo/var/db/zoneinfo | 2 +- ext/tzlocal/tests.py | 68 +- ext/tzlocal/unix.py | 36 +- ext/tzlocal/win32.py | 2 + ext/tzlocal/windows_tz.py | 112 +- ext/wrapt/__init__.py | 19 + ext/wrapt/arguments.py | 96 + ext/wrapt/decorators.py | 512 ++ ext/wrapt/importer.py | 228 + ext/wrapt/wrappers.py | 901 ++++ medusa/helpers/utils.py | 17 + medusa/show_queue.py | 8 +- medusa/tv/series.py | 36 +- requirements.txt | 2 +- 496 files changed, 143907 insertions(+), 2092 deletions(-) create mode 100644 ext/boto/__init__.py create mode 100644 ext/boto/auth.py create mode 100644 ext/boto/auth_handler.py create mode 100644 ext/boto/awslambda/__init__.py create mode 100644 ext/boto/awslambda/exceptions.py create mode 100644 ext/boto/awslambda/layer1.py create mode 100644 ext/boto/beanstalk/__init__.py create mode 100644 ext/boto/beanstalk/exception.py create mode 100644 ext/boto/beanstalk/layer1.py create mode 100644 ext/boto/beanstalk/response.py create mode 100644 ext/boto/beanstalk/wrapper.py create mode 100644 ext/boto/cacerts/__init__.py create mode 100644 ext/boto/cacerts/cacerts.txt create mode 100644 ext/boto/cloudformation/__init__.py create mode 100644 ext/boto/cloudformation/connection.py create mode 100644 ext/boto/cloudformation/stack.py create mode 100644 ext/boto/cloudformation/template.py create mode 100644 ext/boto/cloudfront/__init__.py create mode 100644 ext/boto/cloudfront/distribution.py create mode 100644 ext/boto/cloudfront/exception.py create mode 100644 ext/boto/cloudfront/identity.py create mode 100644 ext/boto/cloudfront/invalidation.py create mode 100644 ext/boto/cloudfront/logging.py create mode 100644 ext/boto/cloudfront/object.py create mode 100644 ext/boto/cloudfront/origin.py create mode 100644 ext/boto/cloudfront/signers.py create mode 100644 ext/boto/cloudhsm/__init__.py create mode 100644 ext/boto/cloudhsm/exceptions.py create mode 100644 ext/boto/cloudhsm/layer1.py create mode 100644 ext/boto/cloudsearch/__init__.py create mode 100644 ext/boto/cloudsearch/document.py create mode 100644 ext/boto/cloudsearch/domain.py create mode 100644 ext/boto/cloudsearch/layer1.py create mode 100644 ext/boto/cloudsearch/layer2.py create mode 100644 ext/boto/cloudsearch/optionstatus.py create mode 100644 ext/boto/cloudsearch/search.py create mode 100644 ext/boto/cloudsearch/sourceattribute.py create mode 100644 ext/boto/cloudsearch2/__init__.py create mode 100644 ext/boto/cloudsearch2/document.py create mode 100644 ext/boto/cloudsearch2/domain.py create mode 100644 ext/boto/cloudsearch2/exceptions.py create mode 100644 ext/boto/cloudsearch2/layer1.py create mode 100644 ext/boto/cloudsearch2/layer2.py create mode 100644 ext/boto/cloudsearch2/optionstatus.py create mode 100644 ext/boto/cloudsearch2/search.py create mode 100644 ext/boto/cloudsearchdomain/__init__.py create mode 100644 ext/boto/cloudsearchdomain/exceptions.py create mode 100644 ext/boto/cloudsearchdomain/layer1.py create mode 100644 ext/boto/cloudtrail/__init__.py create mode 100644 ext/boto/cloudtrail/exceptions.py create mode 100644 ext/boto/cloudtrail/layer1.py create mode 100644 ext/boto/codedeploy/__init__.py create mode 100644 ext/boto/codedeploy/exceptions.py create mode 100644 ext/boto/codedeploy/layer1.py create mode 100644 ext/boto/cognito/__init__.py create mode 100644 ext/boto/cognito/identity/__init__.py create mode 100644 ext/boto/cognito/identity/exceptions.py create mode 100644 ext/boto/cognito/identity/layer1.py create mode 100644 ext/boto/cognito/sync/__init__.py create mode 100644 ext/boto/cognito/sync/exceptions.py create mode 100644 ext/boto/cognito/sync/layer1.py create mode 100644 ext/boto/compat.py create mode 100644 ext/boto/configservice/__init__.py create mode 100644 ext/boto/configservice/exceptions.py create mode 100644 ext/boto/configservice/layer1.py create mode 100644 ext/boto/connection.py create mode 100644 ext/boto/datapipeline/__init__.py create mode 100644 ext/boto/datapipeline/exceptions.py create mode 100644 ext/boto/datapipeline/layer1.py create mode 100644 ext/boto/directconnect/__init__.py create mode 100644 ext/boto/directconnect/exceptions.py create mode 100644 ext/boto/directconnect/layer1.py create mode 100644 ext/boto/dynamodb/__init__.py create mode 100644 ext/boto/dynamodb/batch.py create mode 100644 ext/boto/dynamodb/condition.py create mode 100644 ext/boto/dynamodb/exceptions.py create mode 100644 ext/boto/dynamodb/item.py create mode 100644 ext/boto/dynamodb/layer1.py create mode 100644 ext/boto/dynamodb/layer2.py create mode 100644 ext/boto/dynamodb/schema.py create mode 100644 ext/boto/dynamodb/table.py create mode 100644 ext/boto/dynamodb/types.py create mode 100644 ext/boto/dynamodb2/__init__.py create mode 100644 ext/boto/dynamodb2/exceptions.py create mode 100644 ext/boto/dynamodb2/fields.py create mode 100644 ext/boto/dynamodb2/items.py create mode 100644 ext/boto/dynamodb2/layer1.py create mode 100644 ext/boto/dynamodb2/results.py create mode 100644 ext/boto/dynamodb2/table.py create mode 100644 ext/boto/dynamodb2/types.py create mode 100644 ext/boto/ec2/__init__.py create mode 100644 ext/boto/ec2/address.py create mode 100644 ext/boto/ec2/attributes.py create mode 100644 ext/boto/ec2/autoscale/__init__.py create mode 100644 ext/boto/ec2/autoscale/activity.py create mode 100644 ext/boto/ec2/autoscale/group.py create mode 100644 ext/boto/ec2/autoscale/instance.py create mode 100644 ext/boto/ec2/autoscale/launchconfig.py create mode 100644 ext/boto/ec2/autoscale/limits.py create mode 100644 ext/boto/ec2/autoscale/policy.py create mode 100644 ext/boto/ec2/autoscale/request.py create mode 100644 ext/boto/ec2/autoscale/scheduled.py create mode 100644 ext/boto/ec2/autoscale/tag.py create mode 100644 ext/boto/ec2/blockdevicemapping.py create mode 100644 ext/boto/ec2/bundleinstance.py create mode 100644 ext/boto/ec2/buyreservation.py create mode 100644 ext/boto/ec2/cloudwatch/__init__.py create mode 100644 ext/boto/ec2/cloudwatch/alarm.py create mode 100644 ext/boto/ec2/cloudwatch/datapoint.py create mode 100644 ext/boto/ec2/cloudwatch/dimension.py create mode 100644 ext/boto/ec2/cloudwatch/listelement.py create mode 100644 ext/boto/ec2/cloudwatch/metric.py create mode 100644 ext/boto/ec2/connection.py create mode 100644 ext/boto/ec2/ec2object.py create mode 100644 ext/boto/ec2/elb/__init__.py create mode 100644 ext/boto/ec2/elb/attributes.py create mode 100644 ext/boto/ec2/elb/healthcheck.py create mode 100644 ext/boto/ec2/elb/instancestate.py create mode 100644 ext/boto/ec2/elb/listelement.py create mode 100644 ext/boto/ec2/elb/listener.py create mode 100644 ext/boto/ec2/elb/loadbalancer.py create mode 100644 ext/boto/ec2/elb/policies.py create mode 100644 ext/boto/ec2/elb/securitygroup.py create mode 100644 ext/boto/ec2/group.py create mode 100644 ext/boto/ec2/image.py create mode 100644 ext/boto/ec2/instance.py create mode 100644 ext/boto/ec2/instanceinfo.py create mode 100644 ext/boto/ec2/instancestatus.py create mode 100644 ext/boto/ec2/instancetype.py create mode 100644 ext/boto/ec2/keypair.py create mode 100644 ext/boto/ec2/launchspecification.py create mode 100644 ext/boto/ec2/networkinterface.py create mode 100644 ext/boto/ec2/placementgroup.py create mode 100644 ext/boto/ec2/regioninfo.py create mode 100644 ext/boto/ec2/reservedinstance.py create mode 100644 ext/boto/ec2/securitygroup.py create mode 100644 ext/boto/ec2/snapshot.py create mode 100644 ext/boto/ec2/spotdatafeedsubscription.py create mode 100644 ext/boto/ec2/spotinstancerequest.py create mode 100644 ext/boto/ec2/spotpricehistory.py create mode 100644 ext/boto/ec2/tag.py create mode 100644 ext/boto/ec2/volume.py create mode 100644 ext/boto/ec2/volumestatus.py create mode 100644 ext/boto/ec2/zone.py create mode 100644 ext/boto/ec2containerservice/__init__.py create mode 100644 ext/boto/ec2containerservice/exceptions.py create mode 100644 ext/boto/ec2containerservice/layer1.py create mode 100644 ext/boto/ecs/__init__.py create mode 100644 ext/boto/ecs/item.py create mode 100644 ext/boto/elasticache/__init__.py create mode 100644 ext/boto/elasticache/layer1.py create mode 100644 ext/boto/elastictranscoder/__init__.py create mode 100644 ext/boto/elastictranscoder/exceptions.py create mode 100644 ext/boto/elastictranscoder/layer1.py create mode 100644 ext/boto/emr/__init__.py create mode 100644 ext/boto/emr/bootstrap_action.py create mode 100644 ext/boto/emr/connection.py create mode 100644 ext/boto/emr/emrobject.py create mode 100644 ext/boto/emr/instance_group.py create mode 100644 ext/boto/emr/step.py create mode 100644 ext/boto/endpoints.json create mode 100644 ext/boto/endpoints.py create mode 100644 ext/boto/exception.py create mode 100644 ext/boto/file/README create mode 100644 ext/boto/file/__init__.py create mode 100644 ext/boto/file/bucket.py create mode 100644 ext/boto/file/connection.py create mode 100644 ext/boto/file/key.py create mode 100644 ext/boto/file/simpleresultset.py create mode 100644 ext/boto/fps/__init__.py create mode 100644 ext/boto/fps/connection.py create mode 100644 ext/boto/fps/exception.py create mode 100644 ext/boto/fps/response.py create mode 100644 ext/boto/glacier/__init__.py create mode 100644 ext/boto/glacier/concurrent.py create mode 100644 ext/boto/glacier/exceptions.py create mode 100644 ext/boto/glacier/job.py create mode 100644 ext/boto/glacier/layer1.py create mode 100644 ext/boto/glacier/layer2.py create mode 100644 ext/boto/glacier/response.py create mode 100644 ext/boto/glacier/utils.py create mode 100644 ext/boto/glacier/vault.py create mode 100644 ext/boto/glacier/writer.py create mode 100644 ext/boto/gs/__init__.py create mode 100644 ext/boto/gs/acl.py create mode 100644 ext/boto/gs/bucket.py create mode 100644 ext/boto/gs/bucketlistresultset.py create mode 100644 ext/boto/gs/connection.py create mode 100644 ext/boto/gs/cors.py create mode 100644 ext/boto/gs/key.py create mode 100644 ext/boto/gs/lifecycle.py create mode 100644 ext/boto/gs/resumable_upload_handler.py create mode 100644 ext/boto/gs/user.py create mode 100644 ext/boto/handler.py create mode 100644 ext/boto/https_connection.py create mode 100644 ext/boto/iam/__init__.py create mode 100644 ext/boto/iam/connection.py create mode 100644 ext/boto/iam/summarymap.py create mode 100644 ext/boto/jsonresponse.py create mode 100644 ext/boto/kinesis/__init__.py create mode 100644 ext/boto/kinesis/exceptions.py create mode 100644 ext/boto/kinesis/layer1.py create mode 100644 ext/boto/kms/__init__.py create mode 100644 ext/boto/kms/exceptions.py create mode 100644 ext/boto/kms/layer1.py create mode 100644 ext/boto/logs/__init__.py create mode 100644 ext/boto/logs/exceptions.py create mode 100644 ext/boto/logs/layer1.py create mode 100644 ext/boto/machinelearning/__init__.py create mode 100644 ext/boto/machinelearning/exceptions.py create mode 100644 ext/boto/machinelearning/layer1.py create mode 100644 ext/boto/manage/__init__.py create mode 100644 ext/boto/manage/cmdshell.py create mode 100644 ext/boto/manage/propget.py create mode 100644 ext/boto/manage/server.py create mode 100644 ext/boto/manage/task.py create mode 100644 ext/boto/manage/test_manage.py create mode 100644 ext/boto/manage/volume.py create mode 100644 ext/boto/mashups/__init__.py create mode 100644 ext/boto/mashups/interactive.py create mode 100644 ext/boto/mashups/iobject.py create mode 100644 ext/boto/mashups/order.py create mode 100644 ext/boto/mashups/server.py create mode 100644 ext/boto/mturk/__init__.py create mode 100644 ext/boto/mturk/connection.py create mode 100644 ext/boto/mturk/layoutparam.py create mode 100644 ext/boto/mturk/notification.py create mode 100644 ext/boto/mturk/price.py create mode 100644 ext/boto/mturk/qualification.py create mode 100644 ext/boto/mturk/question.py create mode 100644 ext/boto/mws/__init__.py create mode 100644 ext/boto/mws/connection.py create mode 100644 ext/boto/mws/exception.py create mode 100644 ext/boto/mws/response.py create mode 100644 ext/boto/opsworks/__init__.py create mode 100644 ext/boto/opsworks/exceptions.py create mode 100644 ext/boto/opsworks/layer1.py create mode 100644 ext/boto/plugin.py create mode 100644 ext/boto/provider.py create mode 100644 ext/boto/pyami/__init__.py create mode 100644 ext/boto/pyami/bootstrap.py create mode 100644 ext/boto/pyami/config.py create mode 100644 ext/boto/pyami/copybot.cfg create mode 100644 ext/boto/pyami/copybot.py create mode 100644 ext/boto/pyami/helloworld.py create mode 100644 ext/boto/pyami/installers/__init__.py create mode 100644 ext/boto/pyami/installers/ubuntu/__init__.py create mode 100644 ext/boto/pyami/installers/ubuntu/apache.py create mode 100644 ext/boto/pyami/installers/ubuntu/ebs.py create mode 100644 ext/boto/pyami/installers/ubuntu/installer.py create mode 100644 ext/boto/pyami/installers/ubuntu/mysql.py create mode 100644 ext/boto/pyami/installers/ubuntu/trac.py create mode 100644 ext/boto/pyami/launch_ami.py create mode 100644 ext/boto/pyami/scriptbase.py create mode 100644 ext/boto/pyami/startup.py create mode 100644 ext/boto/rds/__init__.py create mode 100644 ext/boto/rds/dbinstance.py create mode 100644 ext/boto/rds/dbsecuritygroup.py create mode 100644 ext/boto/rds/dbsnapshot.py create mode 100644 ext/boto/rds/dbsubnetgroup.py create mode 100644 ext/boto/rds/event.py create mode 100644 ext/boto/rds/logfile.py create mode 100644 ext/boto/rds/optiongroup.py create mode 100644 ext/boto/rds/parametergroup.py create mode 100644 ext/boto/rds/regioninfo.py create mode 100644 ext/boto/rds/statusinfo.py create mode 100644 ext/boto/rds/vpcsecuritygroupmembership.py create mode 100644 ext/boto/rds2/__init__.py create mode 100644 ext/boto/rds2/exceptions.py create mode 100644 ext/boto/rds2/layer1.py create mode 100644 ext/boto/redshift/__init__.py create mode 100644 ext/boto/redshift/exceptions.py create mode 100644 ext/boto/redshift/layer1.py create mode 100644 ext/boto/regioninfo.py create mode 100644 ext/boto/requestlog.py create mode 100644 ext/boto/resultset.py create mode 100644 ext/boto/roboto/__init__.py create mode 100644 ext/boto/roboto/awsqueryrequest.py create mode 100644 ext/boto/roboto/awsqueryservice.py create mode 100644 ext/boto/roboto/param.py create mode 100644 ext/boto/route53/__init__.py create mode 100644 ext/boto/route53/connection.py create mode 100644 ext/boto/route53/domains/__init__.py create mode 100644 ext/boto/route53/domains/exceptions.py create mode 100644 ext/boto/route53/domains/layer1.py create mode 100644 ext/boto/route53/exception.py create mode 100644 ext/boto/route53/healthcheck.py create mode 100644 ext/boto/route53/hostedzone.py create mode 100644 ext/boto/route53/record.py create mode 100644 ext/boto/route53/status.py create mode 100644 ext/boto/route53/zone.py create mode 100644 ext/boto/s3/__init__.py create mode 100644 ext/boto/s3/acl.py create mode 100644 ext/boto/s3/bucket.py create mode 100644 ext/boto/s3/bucketlistresultset.py create mode 100644 ext/boto/s3/bucketlogging.py create mode 100644 ext/boto/s3/connection.py create mode 100644 ext/boto/s3/cors.py create mode 100644 ext/boto/s3/deletemarker.py create mode 100644 ext/boto/s3/key.py create mode 100644 ext/boto/s3/keyfile.py create mode 100644 ext/boto/s3/lifecycle.py create mode 100644 ext/boto/s3/multidelete.py create mode 100644 ext/boto/s3/multipart.py create mode 100644 ext/boto/s3/prefix.py create mode 100644 ext/boto/s3/resumable_download_handler.py create mode 100644 ext/boto/s3/tagging.py create mode 100644 ext/boto/s3/user.py create mode 100644 ext/boto/s3/website.py create mode 100644 ext/boto/sdb/__init__.py create mode 100644 ext/boto/sdb/connection.py create mode 100644 ext/boto/sdb/db/__init__.py create mode 100644 ext/boto/sdb/db/blob.py create mode 100644 ext/boto/sdb/db/key.py create mode 100644 ext/boto/sdb/db/manager/__init__.py create mode 100644 ext/boto/sdb/db/manager/sdbmanager.py create mode 100644 ext/boto/sdb/db/manager/xmlmanager.py create mode 100644 ext/boto/sdb/db/model.py create mode 100644 ext/boto/sdb/db/property.py create mode 100644 ext/boto/sdb/db/query.py create mode 100644 ext/boto/sdb/db/sequence.py create mode 100644 ext/boto/sdb/db/test_db.py create mode 100644 ext/boto/sdb/domain.py create mode 100644 ext/boto/sdb/item.py create mode 100644 ext/boto/sdb/queryresultset.py create mode 100644 ext/boto/sdb/regioninfo.py create mode 100644 ext/boto/services/__init__.py create mode 100644 ext/boto/services/bs.py create mode 100644 ext/boto/services/message.py create mode 100644 ext/boto/services/result.py create mode 100644 ext/boto/services/service.py create mode 100644 ext/boto/services/servicedef.py create mode 100644 ext/boto/services/sonofmmm.cfg create mode 100644 ext/boto/services/sonofmmm.py create mode 100644 ext/boto/services/submit.py create mode 100644 ext/boto/ses/__init__.py create mode 100644 ext/boto/ses/connection.py create mode 100644 ext/boto/ses/exceptions.py create mode 100644 ext/boto/sns/__init__.py create mode 100644 ext/boto/sns/connection.py create mode 100644 ext/boto/sqs/__init__.py create mode 100644 ext/boto/sqs/attributes.py create mode 100644 ext/boto/sqs/batchresults.py create mode 100644 ext/boto/sqs/bigmessage.py create mode 100644 ext/boto/sqs/connection.py create mode 100644 ext/boto/sqs/jsonmessage.py create mode 100644 ext/boto/sqs/message.py create mode 100644 ext/boto/sqs/messageattributes.py create mode 100644 ext/boto/sqs/queue.py create mode 100644 ext/boto/sqs/regioninfo.py create mode 100644 ext/boto/storage_uri.py create mode 100644 ext/boto/sts/__init__.py create mode 100644 ext/boto/sts/connection.py create mode 100644 ext/boto/sts/credentials.py create mode 100644 ext/boto/support/__init__.py create mode 100644 ext/boto/support/exceptions.py create mode 100644 ext/boto/support/layer1.py create mode 100644 ext/boto/swf/__init__.py create mode 100644 ext/boto/swf/exceptions.py create mode 100644 ext/boto/swf/layer1.py create mode 100644 ext/boto/swf/layer1_decisions.py create mode 100644 ext/boto/swf/layer2.py create mode 100644 ext/boto/utils.py create mode 100644 ext/boto/vendored/__init__.py create mode 100644 ext/boto/vendored/regions/__init__.py create mode 100644 ext/boto/vendored/regions/exceptions.py create mode 100644 ext/boto/vendored/regions/regions.py create mode 100644 ext/boto/vendored/six.py create mode 100644 ext/boto/vpc/__init__.py create mode 100644 ext/boto/vpc/customergateway.py create mode 100644 ext/boto/vpc/dhcpoptions.py create mode 100644 ext/boto/vpc/internetgateway.py create mode 100644 ext/boto/vpc/networkacl.py create mode 100644 ext/boto/vpc/routetable.py create mode 100644 ext/boto/vpc/subnet.py create mode 100644 ext/boto/vpc/vpc.py create mode 100644 ext/boto/vpc/vpc_peering_connection.py create mode 100644 ext/boto/vpc/vpnconnection.py create mode 100644 ext/boto/vpc/vpngateway.py create mode 100644 ext/diskcache/__init__.py create mode 100644 ext/diskcache/cli.py create mode 100644 ext/diskcache/core.py create mode 100644 ext/diskcache/djangocache.py create mode 100644 ext/diskcache/fanout.py create mode 100644 ext/diskcache/memo.py create mode 100644 ext/diskcache/persistent.py create mode 100644 ext/diskcache/stampede.py create mode 100644 ext/funcsigs/__init__.py create mode 100644 ext/funcsigs/version.py create mode 100644 ext/imdbpie/auth.py delete mode 100644 ext/imdbpie/objects.py create mode 100644 ext/js2py/node_import.py create mode 100644 ext/js2py/py_node_modules/__init__.py create mode 100644 ext/mock/__init__.py create mode 100644 ext/mock/mock.py create mode 100644 ext/mock/tests/__init__.py create mode 100644 ext/mock/tests/__main__.py create mode 100644 ext/mock/tests/support.py create mode 100644 ext/mock/tests/testcallable.py create mode 100644 ext/mock/tests/testhelpers.py create mode 100644 ext/mock/tests/testmagicmethods.py create mode 100644 ext/mock/tests/testmock.py create mode 100644 ext/mock/tests/testpatch.py create mode 100644 ext/mock/tests/testsentinel.py create mode 100644 ext/mock/tests/testwith.py delete mode 100644 ext/pytz/zoneinfo/Canada/East-Saskatchewan create mode 100644 ext/pytz/zoneinfo/leapseconds create mode 100644 ext/pytz/zoneinfo/tzdata.zi create mode 100644 ext/setuptools/build_meta.py create mode 100644 ext/setuptools/command/dist_info.py create mode 100644 ext/setuptools/glibc.py create mode 100644 ext/setuptools/pep425tags.py delete mode 100644 ext/setuptools/py26compat.py create mode 100644 ext/setuptools/wheel.py delete mode 100644 ext/tzlocal/darwin.py create mode 100644 ext/wrapt/__init__.py create mode 100644 ext/wrapt/arguments.py create mode 100644 ext/wrapt/decorators.py create mode 100644 ext/wrapt/importer.py create mode 100644 ext/wrapt/wrappers.py diff --git a/ext/boto/__init__.py b/ext/boto/__init__.py new file mode 100644 index 0000000000..b9c65907f1 --- /dev/null +++ b/ext/boto/__init__.py @@ -0,0 +1,1216 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Google, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.config import Config, BotoConfigLocations +from boto.storage_uri import BucketStorageUri, FileStorageUri +import boto.plugin +import datetime +import os +import platform +import re +import sys +import logging +import logging.config + +from boto.compat import urlparse +from boto.exception import InvalidUriError + +__version__ = '2.48.0' +Version = __version__ # for backware compatibility + +# http://bugs.python.org/issue7980 +datetime.datetime.strptime('', '') + +UserAgent = 'Boto/%s Python/%s %s/%s' % ( + __version__, + platform.python_version(), + platform.system(), + platform.release() +) +config = Config() + +# Regex to disallow buckets violating charset or not [3..255] chars total. +BUCKET_NAME_RE = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\._-]{1,253}[a-zA-Z0-9]$') +# Regex to disallow buckets with individual DNS labels longer than 63. +TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}') +GENERATION_RE = re.compile(r'(?P.+)' + r'#(?P[0-9]+)$') +VERSION_RE = re.compile('(?P.+)#(?P.+)$') +ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json') + + +def init_logging(): + for file in BotoConfigLocations: + try: + logging.config.fileConfig(os.path.expanduser(file)) + except: + pass + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +log = logging.getLogger('boto') +perflog = logging.getLogger('boto.perf') +log.addHandler(NullHandler()) +perflog.addHandler(NullHandler()) +init_logging() + +# convenience function to set logging to a particular file + + +def set_file_logger(name, filepath, level=logging.INFO, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.FileHandler(filepath) + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + + +def set_stream_logger(name, level=logging.DEBUG, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.StreamHandler() + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + + +def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sqs.connection.SQSConnection` + :return: A connection to Amazon's SQS + """ + from boto.sqs.connection import SQSConnection + return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to Amazon's S3 + """ + from boto.s3.connection import S3Connection + return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs): + """ + @type gs_access_key_id: string + @param gs_access_key_id: Your Google Cloud Storage Access Key ID + + @type gs_secret_access_key: string + @param gs_secret_access_key: Your Google Cloud Storage Secret Access Key + + @rtype: L{GSConnection} + @return: A connection to Google's Storage service + """ + from boto.gs.connection import GSConnection + return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs) + + +def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Amazon's EC2 + """ + from boto.ec2.connection import EC2Connection + return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.elb.ELBConnection` + :return: A connection to Amazon's Load Balancing Service + """ + from boto.ec2.elb import ELBConnection + return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` + :return: A connection to Amazon's Auto Scaling Service + + :type use_block_device_types bool + :param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing + block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability + with the old incorrect style. + """ + from boto.ec2.autoscale import AutoScaleConnection + return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` + :return: A connection to Amazon's EC2 Monitoring service + """ + from boto.ec2.cloudwatch import CloudWatchConnection + return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sdb.connection.SDBConnection` + :return: A connection to Amazon's SDB + """ + from boto.sdb.connection import SDBConnection + return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.fps.connection import FPSConnection + return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.mturk.connection.MTurkConnection` + :return: A connection to MTurk + """ + from boto.mturk.connection import MTurkConnection + return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.cloudfront import CloudFrontConnection + return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.vpc.VPCConnection` + :return: A connection to VPC + """ + from boto.vpc import VPCConnection + return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds.RDSConnection` + :return: A connection to RDS + """ + from boto.rds import RDSConnection + return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds2.layer1.RDSConnection` + :return: A connection to RDS + """ + from boto.rds2.layer1 import RDSConnection + return RDSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.emr.EmrConnection` + :return: A connection to Elastic mapreduce + """ + from boto.emr import EmrConnection + return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sns.SNSConnection` + :return: A connection to Amazon's SNS + """ + from boto.sns import SNSConnection + return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.iam.IAMConnection` + :return: A connection to Amazon's IAM + """ + from boto.iam import IAMConnection + return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.dns.Route53Connection` + :return: A connection to Amazon's Route53 DNS Service + """ + from boto.route53 import Route53Connection + return Route53Connection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudformation.CloudFormationConnection` + :return: A connection to Amazon's CloudFormation Service + """ + from boto.cloudformation import CloudFormationConnection + return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, + port=8773, path='/services/Eucalyptus', is_secure=False, + **kwargs): + """ + Connect to a Eucalyptus service. + + :type host: string + :param host: the host name or ip address of the Eucalyptus server + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Eucalyptus server + """ + from boto.ec2 import EC2Connection + from boto.ec2.regioninfo import RegionInfo + + # Check for values in boto config, if not supplied as args + if not aws_access_key_id: + aws_access_key_id = config.get('Credentials', + 'euca_access_key_id', + None) + if not aws_secret_access_key: + aws_secret_access_key = config.get('Credentials', + 'euca_secret_access_key', + None) + if not host: + host = config.get('Boto', 'eucalyptus_host', None) + + reg = RegionInfo(name='eucalyptus', endpoint=host) + return EC2Connection(aws_access_key_id, aws_secret_access_key, + region=reg, port=port, path=path, + is_secure=is_secure, **kwargs) + + +def connect_glacier(aws_access_key_id=None, aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.glacier.layer2.Layer2` + :return: A connection to Amazon's Glacier Service + """ + from boto.glacier.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_ec2_endpoint(url, aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to an EC2 Api endpoint. Additional arguments are passed + through to connect_ec2. + + :type url: string + :param url: A url for the ec2 api endpoint to connect to + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Eucalyptus server + """ + from boto.ec2.regioninfo import RegionInfo + + purl = urlparse(url) + kwargs['port'] = purl.port + kwargs['host'] = purl.hostname + kwargs['path'] = purl.path + if not 'is_secure' in kwargs: + kwargs['is_secure'] = (purl.scheme == "https") + + kwargs['region'] = RegionInfo(name=purl.hostname, + endpoint=purl.hostname) + kwargs['aws_access_key_id'] = aws_access_key_id + kwargs['aws_secret_access_key'] = aws_secret_access_key + + return(connect_ec2(**kwargs)) + + +def connect_walrus(host=None, aws_access_key_id=None, + aws_secret_access_key=None, + port=8773, path='/services/Walrus', is_secure=False, + **kwargs): + """ + Connect to a Walrus service. + + :type host: string + :param host: the host name or ip address of the Walrus server + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to Walrus + """ + from boto.s3.connection import S3Connection + from boto.s3.connection import OrdinaryCallingFormat + + # Check for values in boto config, if not supplied as args + if not aws_access_key_id: + aws_access_key_id = config.get('Credentials', + 'euca_access_key_id', + None) + if not aws_secret_access_key: + aws_secret_access_key = config.get('Credentials', + 'euca_secret_access_key', + None) + if not host: + host = config.get('Boto', 'walrus_host', None) + + return S3Connection(aws_access_key_id, aws_secret_access_key, + host=host, port=port, path=path, + calling_format=OrdinaryCallingFormat(), + is_secure=is_secure, **kwargs) + + +def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ses.SESConnection` + :return: A connection to Amazon's SES + """ + from boto.ses import SESConnection + return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sts.STSConnection` + :return: A connection to Amazon's STS + """ + from boto.sts import STSConnection + return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, + is_secure=False, **kwargs): + """ + Connect to the Internet Archive via their S3-like API. + + :type ia_access_key_id: string + :param ia_access_key_id: Your IA Access Key ID. This will also look + in your boto config file for an entry in the Credentials + section called "ia_access_key_id" + + :type ia_secret_access_key: string + :param ia_secret_access_key: Your IA Secret Access Key. This will also + look in your boto config file for an entry in the Credentials + section called "ia_secret_access_key" + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to the Internet Archive + """ + from boto.s3.connection import S3Connection + from boto.s3.connection import OrdinaryCallingFormat + + access_key = config.get('Credentials', 'ia_access_key_id', + ia_access_key_id) + secret_key = config.get('Credentials', 'ia_secret_access_key', + ia_secret_access_key) + + return S3Connection(access_key, secret_key, + host='s3.us.archive.org', + calling_format=OrdinaryCallingFormat(), + is_secure=is_secure, **kwargs) + + +def connect_dynamodb(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.dynamodb.layer2.Layer2` + :return: A connection to the Layer2 interface for DynamoDB. + """ + from boto.dynamodb.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_swf(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.swf.layer1.Layer1` + :return: A connection to the Layer1 interface for SWF. + """ + from boto.swf.layer1 import Layer1 + return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_cloudsearch(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearch.layer2.Layer2` + :return: A connection to Amazon's CloudSearch service + """ + from boto.cloudsearch.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + **kwargs) + + +def connect_cloudsearch2(aws_access_key_id=None, + aws_secret_access_key=None, + sign_request=False, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :type sign_request: bool + :param sign_request: whether or not to sign search and + upload requests + + :rtype: :class:`boto.cloudsearch2.layer2.Layer2` + :return: A connection to Amazon's CloudSearch2 service + """ + from boto.cloudsearch2.layer2 import Layer2 + return Layer2(aws_access_key_id, aws_secret_access_key, + sign_request=sign_request, + **kwargs) + + +def connect_cloudsearchdomain(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudsearchdomain.layer1.CloudSearchDomainConnection` + :return: A connection to Amazon's CloudSearch Domain service + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return CloudSearchDomainConnection(aws_access_key_id, + aws_secret_access_key, **kwargs) + + +def connect_beanstalk(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.beanstalk.layer1.Layer1` + :return: A connection to Amazon's Elastic Beanstalk service + """ + from boto.beanstalk.layer1 import Layer1 + return Layer1(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def connect_elastictranscoder(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ets.layer1.ElasticTranscoderConnection` + :return: A connection to Amazon's Elastic Transcoder service + """ + from boto.elastictranscoder.layer1 import ElasticTranscoderConnection + return ElasticTranscoderConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs) + + +def connect_opsworks(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + from boto.opsworks.layer1 import OpsWorksConnection + return OpsWorksConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs) + + +def connect_redshift(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.redshift.layer1.RedshiftConnection` + :return: A connection to Amazon's Redshift service + """ + from boto.redshift.layer1 import RedshiftConnection + return RedshiftConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_support(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.support.layer1.SupportConnection` + :return: A connection to Amazon's Support service + """ + from boto.support.layer1 import SupportConnection + return SupportConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cloudtrail(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudTrail + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection` + :return: A connection to the AWS Cloudtrail service + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + return CloudTrailConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_directconnect(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS DirectConnect + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.directconnect.layer1.DirectConnectConnection` + :return: A connection to the AWS DirectConnect service + """ + from boto.directconnect.layer1 import DirectConnectConnection + return DirectConnectConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + +def connect_kinesis(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Kinesis + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kinesis.layer1.KinesisConnection` + :return: A connection to the Amazon Kinesis service + """ + from boto.kinesis.layer1 import KinesisConnection + return KinesisConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + +def connect_logs(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon CloudWatch Logs + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kinesis.layer1.CloudWatchLogsConnection` + :return: A connection to the Amazon CloudWatch Logs service + """ + from boto.logs.layer1 import CloudWatchLogsConnection + return CloudWatchLogsConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_route53domains(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Route 53 Domains + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.route53.domains.layer1.Route53DomainsConnection` + :return: A connection to the Amazon Route 53 Domains service + """ + from boto.route53.domains.layer1 import Route53DomainsConnection + return Route53DomainsConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cognito_identity(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Cognito Identity + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.identity.layer1.CognitoIdentityConnection` + :return: A connection to the Amazon Cognito Identity service + """ + from boto.cognito.identity.layer1 import CognitoIdentityConnection + return CognitoIdentityConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cognito_sync(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Cognito Sync + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.sync.layer1.CognitoSyncConnection` + :return: A connection to the Amazon Cognito Sync service + """ + from boto.cognito.sync.layer1 import CognitoSyncConnection + return CognitoSyncConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_kms(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Key Management Service + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.KMSConnection` + :return: A connection to the AWS Key Management Service + """ + from boto.kms.layer1 import KMSConnection + return KMSConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_awslambda(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Lambda + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.awslambda.layer1.AWSLambdaConnection` + :return: A connection to the AWS Lambda service + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return AWSLambdaConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_codedeploy(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CodeDeploy + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cognito.sync.layer1.CodeDeployConnection` + :return: A connection to the AWS CodeDeploy service + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return CodeDeployConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_configservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS Config + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.kms.layer1.ConfigServiceConnection` + :return: A connection to the AWS Config service + """ + from boto.configservice.layer1 import ConfigServiceConnection + return ConfigServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_cloudhsm(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudHSM + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + rtype: :class:`boto.cloudhsm.layer1.CloudHSMConnection` + :return: A connection to the AWS CloudHSM service + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return CloudHSMConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_ec2containerservice(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon EC2 Container Service + rtype: :class:`boto.ec2containerservice.layer1.EC2ContainerServiceConnection` + :return: A connection to the Amazon EC2 Container Service + """ + from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection + return EC2ContainerServiceConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def connect_machinelearning(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to Amazon Machine Learning service + rtype: :class:`boto.machinelearning.layer1.MachineLearningConnection` + :return: A connection to the Amazon Machine Learning service + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return MachineLearningConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + +def storage_uri(uri_str, default_scheme='file', debug=0, validate=True, + bucket_storage_uri_class=BucketStorageUri, + suppress_consec_slashes=True, is_latest=False): + """ + Instantiate a StorageUri from a URI string. + + :type uri_str: string + :param uri_str: URI naming bucket + optional object. + :type default_scheme: string + :param default_scheme: default scheme for scheme-less URIs. + :type debug: int + :param debug: debug level to pass in to boto connection (range 0..2). + :type validate: bool + :param validate: whether to check for bucket name validity. + :type bucket_storage_uri_class: BucketStorageUri interface. + :param bucket_storage_uri_class: Allows mocking for unit tests. + :param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + :type is_latest: bool + :param is_latest: whether this versioned object represents the + current version. + + We allow validate to be disabled to allow caller + to implement bucket-level wildcarding (outside the boto library; + see gsutil). + + :rtype: :class:`boto.StorageUri` subclass + :return: StorageUri subclass for given URI. + + ``uri_str`` must be one of the following formats: + + * gs://bucket/name + * gs://bucket/name#ver + * s3://bucket/name + * gs://bucket + * s3://bucket + * filename (which could be a Unix path like /a/b/c or a Windows path like + C:\a\b\c) + + The last example uses the default scheme ('file', unless overridden). + """ + version_id = None + generation = None + + # Manually parse URI components instead of using urlparse because + # what we're calling URIs don't really fit the standard syntax for URIs + # (the latter includes an optional host/net location part). + end_scheme_idx = uri_str.find('://') + if end_scheme_idx == -1: + scheme = default_scheme.lower() + path = uri_str + else: + scheme = uri_str[0:end_scheme_idx].lower() + path = uri_str[end_scheme_idx + 3:] + + if scheme not in ['file', 's3', 'gs']: + raise InvalidUriError('Unrecognized scheme "%s"' % scheme) + if scheme == 'file': + # For file URIs we have no bucket name, and use the complete path + # (minus 'file://') as the object name. + is_stream = False + if path == '-': + is_stream = True + return FileStorageUri(path, debug, is_stream) + else: + path_parts = path.split('/', 1) + bucket_name = path_parts[0] + object_name = '' + # If validate enabled, ensure the bucket name is valid, to avoid + # possibly confusing other parts of the code. (For example if we didn't + # catch bucket names containing ':', when a user tried to connect to + # the server with that name they might get a confusing error about + # non-integer port numbers.) + if (validate and bucket_name and + (not BUCKET_NAME_RE.match(bucket_name) + or TOO_LONG_DNS_NAME_COMP.search(bucket_name))): + raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str) + if scheme == 'gs': + match = GENERATION_RE.search(path) + if match: + md = match.groupdict() + versionless_uri_str = md['versionless_uri_str'] + path_parts = versionless_uri_str.split('/', 1) + generation = int(md['generation']) + elif scheme == 's3': + match = VERSION_RE.search(path) + if match: + md = match.groupdict() + versionless_uri_str = md['versionless_uri_str'] + path_parts = versionless_uri_str.split('/', 1) + version_id = md['version_id'] + else: + raise InvalidUriError('Unrecognized scheme "%s"' % scheme) + if len(path_parts) > 1: + object_name = path_parts[1] + return bucket_storage_uri_class( + scheme, bucket_name, object_name, debug, + suppress_consec_slashes=suppress_consec_slashes, + version_id=version_id, generation=generation, is_latest=is_latest) + + +def storage_uri_for_key(key): + """Returns a StorageUri for the given key. + + :type key: :class:`boto.s3.key.Key` or subclass + :param key: URI naming bucket + optional object. + """ + if not isinstance(key, boto.s3.key.Key): + raise InvalidUriError('Requested key (%s) is not a subclass of ' + 'boto.s3.key.Key' % str(type(key))) + prov_name = key.bucket.connection.provider.get_provider_name() + uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name) + return storage_uri(uri_str) + +boto.plugin.load_plugins(config) diff --git a/ext/boto/auth.py b/ext/boto/auth.py new file mode 100644 index 0000000000..b479d12674 --- /dev/null +++ b/ext/boto/auth.py @@ -0,0 +1,1099 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +""" +Handles authentication required to AWS and GS +""" + +import base64 +import boto +import boto.auth_handler +import boto.exception +import boto.plugin +import boto.utils +import copy +import datetime +from email.utils import formatdate +import hmac +import os +import posixpath + +from boto.compat import urllib, encodebytes, parse_qs_safe, urlparse +from boto.auth_handler import AuthHandler +from boto.exception import BotoClientError + +try: + from hashlib import sha1 as sha + from hashlib import sha256 as sha256 +except ImportError: + import sha + sha256 = None + + +# Region detection strings to determine if SigV2 should be used +# by default +S3_AUTH_DETECT = [ + '-ap-northeast-1', + '.ap-northeast-1', + '-ap-southeast-1', + '.ap-southeast-1', + '-ap-southeast-2', + '.ap-southeast-2', + '-eu-west-1', + '.eu-west-1', + '-external-1', + '.external-1', + '-sa-east-1', + '.sa-east-1', + '-us-east-1', + '.us-east-1', + '-us-gov-west-1', + '.us-gov-west-1', + '-us-west-1', + '.us-west-1', + '-us-west-2', + '.us-west-2' +] + + +SIGV4_DETECT = [ + '.cn-', + # In eu-central and ap-northeast-2 we support both host styles for S3 + '.eu-central', + '-eu-central', + '.ap-northeast-2', + '-ap-northeast-2', + '.ap-south-1', + '-ap-south-1', + '.us-east-2', + '-us-east-2', + '-ca-central', + '.ca-central', + '.eu-west-2', + '-eu-west-2', +] + + +class HmacKeys(object): + """Key based Auth handler helper.""" + + def __init__(self, host, config, provider): + if provider.access_key is None or provider.secret_key is None: + raise boto.auth_handler.NotReadyToAuthenticate() + self.host = host + self.update_provider(provider) + + def update_provider(self, provider): + self._provider = provider + self._hmac = hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=sha) + if sha256: + self._hmac_256 = hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=sha256) + else: + self._hmac_256 = None + + def algorithm(self): + if self._hmac_256: + return 'HmacSHA256' + else: + return 'HmacSHA1' + + def _get_hmac(self): + if self._hmac_256: + digestmod = sha256 + else: + digestmod = sha + return hmac.new(self._provider.secret_key.encode('utf-8'), + digestmod=digestmod) + + def sign_string(self, string_to_sign): + new_hmac = self._get_hmac() + new_hmac.update(string_to_sign.encode('utf-8')) + return encodebytes(new_hmac.digest()).decode('utf-8').strip() + + def __getstate__(self): + pickled_dict = copy.copy(self.__dict__) + del pickled_dict['_hmac'] + del pickled_dict['_hmac_256'] + return pickled_dict + + def __setstate__(self, dct): + self.__dict__ = dct + self.update_provider(self._provider) + + +class AnonAuthHandler(AuthHandler, HmacKeys): + """ + Implements Anonymous requests. + """ + + capability = ['anon'] + + def __init__(self, host, config, provider): + super(AnonAuthHandler, self).__init__(host, config, provider) + + def add_auth(self, http_request, **kwargs): + pass + + +class HmacAuthV1Handler(AuthHandler, HmacKeys): + """ Implements the HMAC request signing used by S3 and GS.""" + + capability = ['hmac-v1', 's3'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + self._hmac_256 = None + + def update_provider(self, provider): + super(HmacAuthV1Handler, self).update_provider(provider) + self._hmac_256 = None + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + method = http_request.method + auth_path = http_request.auth_path + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + string_to_sign = boto.utils.canonical_string(method, auth_path, + headers, None, + self._provider) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + b64_hmac = self.sign_string(string_to_sign) + auth_hdr = self._provider.auth_header + auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac)) + boto.log.debug('Signature:\n%s' % auth) + headers['Authorization'] = auth + + +class HmacAuthV2Handler(AuthHandler, HmacKeys): + """ + Implements the simplified HMAC authorization used by CloudFront. + """ + capability = ['hmac-v2', 'cloudfront'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + self._hmac_256 = None + + def update_provider(self, provider): + super(HmacAuthV2Handler, self).update_provider(provider) + self._hmac_256 = None + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + + b64_hmac = self.sign_string(headers['Date']) + auth_hdr = self._provider.auth_header + headers['Authorization'] = ("%s %s:%s" % + (auth_hdr, + self._provider.access_key, b64_hmac)) + + +class HmacAuthV3Handler(AuthHandler, HmacKeys): + """Implements the new Version 3 HMAC authorization used by Route53.""" + + capability = ['hmac-v3', 'route53', 'ses'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + if 'Date' not in headers: + headers['Date'] = formatdate(usegmt=True) + + if self._provider.security_token: + key = self._provider.security_token_header + headers[key] = self._provider.security_token + + b64_hmac = self.sign_string(headers['Date']) + s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key + s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac) + headers['X-Amzn-Authorization'] = s + + +class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys): + """ + Implements the new Version 3 HMAC authorization used by DynamoDB. + """ + + capability = ['hmac-v3-http'] + + def __init__(self, host, config, provider): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + headers_to_sign = {'Host': self.host} + for name, value in http_request.headers.items(): + lname = name.lower() + if lname.startswith('x-amz'): + headers_to_sign[name] = value + return headers_to_sign + + def canonical_headers(self, headers_to_sign): + """ + Return the headers that need to be included in the StringToSign + in their canonical form by converting all header keys to lower + case, sorting them in alphabetical order and then joining + them into a string, separated by newlines. + """ + l = sorted(['%s:%s' % (n.lower().strip(), + headers_to_sign[n].strip()) for n in headers_to_sign]) + return '\n'.join(l) + + def string_to_sign(self, http_request): + """ + Return the canonical StringToSign as well as a dict + containing the original version of all headers that + were included in the StringToSign. + """ + headers_to_sign = self.headers_to_sign(http_request) + canonical_headers = self.canonical_headers(headers_to_sign) + string_to_sign = '\n'.join([http_request.method, + http_request.auth_path, + '', + canonical_headers, + '', + http_request.body]) + return string_to_sign, headers_to_sign + + def add_auth(self, req, **kwargs): + """ + Add AWS3 authentication to a request. + + :type req: :class`boto.connection.HTTPRequest` + :param req: The HTTPRequest object. + """ + # This could be a retry. Make sure the previous + # authorization header is removed first. + if 'X-Amzn-Authorization' in req.headers: + del req.headers['X-Amzn-Authorization'] + req.headers['X-Amz-Date'] = formatdate(usegmt=True) + if self._provider.security_token: + req.headers['X-Amz-Security-Token'] = self._provider.security_token + string_to_sign, headers_to_sign = self.string_to_sign(req) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + hash_value = sha256(string_to_sign.encode('utf-8')).digest() + b64_hmac = self.sign_string(hash_value) + s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key + s += "Algorithm=%s," % self.algorithm() + s += "SignedHeaders=%s," % ';'.join(headers_to_sign) + s += "Signature=%s" % b64_hmac + req.headers['X-Amzn-Authorization'] = s + + +class HmacAuthV4Handler(AuthHandler, HmacKeys): + """ + Implements the new Version 4 HMAC authorization. + """ + + capability = ['hmac-v4'] + + def __init__(self, host, config, provider, + service_name=None, region_name=None): + AuthHandler.__init__(self, host, config, provider) + HmacKeys.__init__(self, host, config, provider) + # You can set the service_name and region_name to override the + # values which would otherwise come from the endpoint, e.g. + # ..amazonaws.com. + self.service_name = service_name + self.region_name = region_name + + def _sign(self, key, msg, hex=False): + if not isinstance(key, bytes): + key = key.encode('utf-8') + + if hex: + sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest() + else: + sig = hmac.new(key, msg.encode('utf-8'), sha256).digest() + return sig + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + host_header_value = self.host_header(self.host, http_request) + if http_request.headers.get('Host'): + host_header_value = http_request.headers['Host'] + headers_to_sign = {'Host': host_header_value} + for name, value in http_request.headers.items(): + lname = name.lower() + if lname.startswith('x-amz'): + if isinstance(value, bytes): + value = value.decode('utf-8') + headers_to_sign[name] = value + return headers_to_sign + + def host_header(self, host, http_request): + port = http_request.port + secure = http_request.protocol == 'https' + if ((port == 80 and not secure) or (port == 443 and secure)): + return host + return '%s:%s' % (host, port) + + def query_string(self, http_request): + parameter_names = sorted(http_request.params.keys()) + pairs = [] + for pname in parameter_names: + pval = boto.utils.get_utf8_value(http_request.params[pname]) + pairs.append(urllib.parse.quote(pname, safe='') + '=' + + urllib.parse.quote(pval, safe='-_~')) + return '&'.join(pairs) + + def canonical_query_string(self, http_request): + # POST requests pass parameters in through the + # http_request.body field. + if http_request.method == 'POST': + return "" + l = [] + for param in sorted(http_request.params): + value = boto.utils.get_utf8_value(http_request.params[param]) + l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'), + urllib.parse.quote(value, safe='-_.~'))) + return '&'.join(l) + + def canonical_headers(self, headers_to_sign): + """ + Return the headers that need to be included in the StringToSign + in their canonical form by converting all header keys to lower + case, sorting them in alphabetical order and then joining + them into a string, separated by newlines. + """ + canonical = [] + + for header in headers_to_sign: + c_name = header.lower().strip() + raw_value = str(headers_to_sign[header]) + if '"' in raw_value: + c_value = raw_value.strip() + else: + c_value = ' '.join(raw_value.strip().split()) + canonical.append('%s:%s' % (c_name, c_value)) + return '\n'.join(sorted(canonical)) + + def signed_headers(self, headers_to_sign): + l = ['%s' % n.lower().strip() for n in headers_to_sign] + l = sorted(l) + return ';'.join(l) + + def canonical_uri(self, http_request): + path = http_request.auth_path + # Normalize the path + # in windows normpath('/') will be '\\' so we chane it back to '/' + normalized = posixpath.normpath(path).replace('\\', '/') + # Then urlencode whatever's left. + encoded = urllib.parse.quote(normalized) + if len(path) > 1 and path.endswith('/'): + encoded += '/' + return encoded + + def payload(self, http_request): + body = http_request.body + # If the body is a file like object, we can use + # boto.utils.compute_hash, which will avoid reading + # the entire body into memory. + if hasattr(body, 'seek') and hasattr(body, 'read'): + return boto.utils.compute_hash(body, hash_algorithm=sha256)[0] + elif not isinstance(body, bytes): + body = body.encode('utf-8') + return sha256(body).hexdigest() + + def canonical_request(self, http_request): + cr = [http_request.method.upper()] + cr.append(self.canonical_uri(http_request)) + cr.append(self.canonical_query_string(http_request)) + headers_to_sign = self.headers_to_sign(http_request) + cr.append(self.canonical_headers(headers_to_sign) + '\n') + cr.append(self.signed_headers(headers_to_sign)) + cr.append(self.payload(http_request)) + return '\n'.join(cr) + + def scope(self, http_request): + scope = [self._provider.access_key] + scope.append(http_request.timestamp) + scope.append(http_request.region_name) + scope.append(http_request.service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def split_host_parts(self, host): + return host.split('.') + + def determine_region_name(self, host): + parts = self.split_host_parts(host) + if self.region_name is not None: + region_name = self.region_name + elif len(parts) > 1: + if parts[1] == 'us-gov': + region_name = 'us-gov-west-1' + else: + if len(parts) == 3: + region_name = 'us-east-1' + else: + region_name = parts[1] + else: + region_name = parts[0] + + return region_name + + def determine_service_name(self, host): + parts = self.split_host_parts(host) + if self.service_name is not None: + service_name = self.service_name + else: + service_name = parts[0] + return service_name + + def credential_scope(self, http_request): + scope = [] + http_request.timestamp = http_request.headers['X-Amz-Date'][0:8] + scope.append(http_request.timestamp) + # The service_name and region_name either come from: + # * The service_name/region_name attrs or (if these values are None) + # * parsed from the endpoint ..amazonaws.com. + region_name = self.determine_region_name(http_request.host) + service_name = self.determine_service_name(http_request.host) + http_request.service_name = service_name + http_request.region_name = region_name + + scope.append(http_request.region_name) + scope.append(http_request.service_name) + scope.append('aws4_request') + return '/'.join(scope) + + def string_to_sign(self, http_request, canonical_request): + """ + Return the canonical StringToSign as well as a dict + containing the original version of all headers that + were included in the StringToSign. + """ + sts = ['AWS4-HMAC-SHA256'] + sts.append(http_request.headers['X-Amz-Date']) + sts.append(self.credential_scope(http_request)) + sts.append(sha256(canonical_request.encode('utf-8')).hexdigest()) + return '\n'.join(sts) + + def signature(self, http_request, string_to_sign): + key = self._provider.secret_key + k_date = self._sign(('AWS4' + key).encode('utf-8'), + http_request.timestamp) + k_region = self._sign(k_date, http_request.region_name) + k_service = self._sign(k_region, http_request.service_name) + k_signing = self._sign(k_service, 'aws4_request') + return self._sign(k_signing, string_to_sign, hex=True) + + def add_auth(self, req, **kwargs): + """ + Add AWS4 authentication to a request. + + :type req: :class`boto.connection.HTTPRequest` + :param req: The HTTPRequest object. + """ + # This could be a retry. Make sure the previous + # authorization header is removed first. + if 'X-Amzn-Authorization' in req.headers: + del req.headers['X-Amzn-Authorization'] + now = datetime.datetime.utcnow() + req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ') + if self._provider.security_token: + req.headers['X-Amz-Security-Token'] = self._provider.security_token + qs = self.query_string(req) + + qs_to_post = qs + + # We do not want to include any params that were mangled into + # the params if performing s3-sigv4 since it does not + # belong in the body of a post for some requests. Mangled + # refers to items in the query string URL being added to the + # http response params. However, these params get added to + # the body of the request, but the query string URL does not + # belong in the body of the request. ``unmangled_resp`` is the + # response that happened prior to the mangling. This ``unmangled_req`` + # kwarg will only appear for s3-sigv4. + if 'unmangled_req' in kwargs: + qs_to_post = self.query_string(kwargs['unmangled_req']) + + if qs_to_post and req.method == 'POST': + # Stash request parameters into post body + # before we generate the signature. + req.body = qs_to_post + req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + req.headers['Content-Length'] = str(len(req.body)) + else: + # Safe to modify req.path here since + # the signature will use req.auth_path. + req.path = req.path.split('?')[0] + + if qs: + # Don't insert the '?' unless there's actually a query string + req.path = req.path + '?' + qs + canonical_request = self.canonical_request(req) + boto.log.debug('CanonicalRequest:\n%s' % canonical_request) + string_to_sign = self.string_to_sign(req, canonical_request) + boto.log.debug('StringToSign:\n%s' % string_to_sign) + signature = self.signature(req, string_to_sign) + boto.log.debug('Signature:\n%s' % signature) + headers_to_sign = self.headers_to_sign(req) + l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)] + l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign)) + l.append('Signature=%s' % signature) + req.headers['Authorization'] = ','.join(l) + + +class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler): + """ + Implements a variant of Version 4 HMAC authorization specific to S3. + """ + capability = ['hmac-v4-s3'] + + def __init__(self, *args, **kwargs): + super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs) + + if self.region_name: + self.region_name = self.clean_region_name(self.region_name) + + def clean_region_name(self, region_name): + if region_name.startswith('s3-'): + return region_name[3:] + + return region_name + + def canonical_uri(self, http_request): + # S3 does **NOT** do path normalization that SigV4 typically does. + # Urlencode the path, **NOT** ``auth_path`` (because vhosting). + path = urllib.parse.urlparse(http_request.path) + # Because some quoting may have already been applied, let's back it out. + unquoted = urllib.parse.unquote(path.path) + # Requote, this time addressing all characters. + encoded = urllib.parse.quote(unquoted, safe='/~') + return encoded + + def canonical_query_string(self, http_request): + # Note that we just do not return an empty string for + # POST request. Query strings in url are included in canonical + # query string. + l = [] + for param in sorted(http_request.params): + value = boto.utils.get_utf8_value(http_request.params[param]) + l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'), + urllib.parse.quote(value, safe='-_.~'))) + return '&'.join(l) + + def host_header(self, host, http_request): + port = http_request.port + secure = http_request.protocol == 'https' + if ((port == 80 and not secure) or (port == 443 and secure)): + return http_request.host + return '%s:%s' % (http_request.host, port) + + def headers_to_sign(self, http_request): + """ + Select the headers from the request that need to be included + in the StringToSign. + """ + host_header_value = self.host_header(self.host, http_request) + headers_to_sign = {'Host': host_header_value} + for name, value in http_request.headers.items(): + lname = name.lower() + # Hooray for the only difference! The main SigV4 signer only does + # ``Host`` + ``x-amz-*``. But S3 wants pretty much everything + # signed, except for authorization itself. + if lname not in ['authorization']: + headers_to_sign[name] = value + return headers_to_sign + + def determine_region_name(self, host): + # S3's different format(s) of representing region/service from the + # rest of AWS makes this hurt too. + # + # Possible domain formats: + # - s3.amazonaws.com (Classic) + # - s3-us-west-2.amazonaws.com (Specific region) + # - bukkit.s3.amazonaws.com (Vhosted Classic) + # - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region) + # - s3.cn-north-1.amazonaws.com.cn - (Beijing region) + # - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Beijing region) + parts = self.split_host_parts(host) + + if self.region_name is not None: + region_name = self.region_name + else: + # Classic URLs - s3-us-west-2.amazonaws.com + if len(parts) == 3: + region_name = self.clean_region_name(parts[0]) + + # Special-case for Classic. + if region_name == 's3': + region_name = 'us-east-1' + else: + # Iterate over the parts in reverse order. + for offset, part in enumerate(reversed(parts)): + part = part.lower() + + # Look for the first thing starting with 's3'. + # Until there's a ``.s3`` TLD, we should be OK. :P + if part == 's3': + # If it's by itself, the region is the previous part. + region_name = parts[-offset] + + # Unless it's Vhosted classic + if region_name == 'amazonaws': + region_name = 'us-east-1' + + break + elif part.startswith('s3-'): + region_name = self.clean_region_name(part) + break + + return region_name + + def determine_service_name(self, host): + # Should this signing mechanism ever be used for anything else, this + # will fail. Consider utilizing the logic from the parent class should + # you find yourself here. + return 's3' + + def mangle_path_and_params(self, req): + """ + Returns a copy of the request object with fixed ``auth_path/params`` + attributes from the original. + """ + modified_req = copy.copy(req) + + # Unlike the most other services, in S3, ``req.params`` isn't the only + # source of query string parameters. + # Because of the ``query_args``, we may already have a query string + # **ON** the ``path/auth_path``. + # Rip them apart, so the ``auth_path/params`` can be signed + # appropriately. + parsed_path = urllib.parse.urlparse(modified_req.auth_path) + modified_req.auth_path = parsed_path.path + + if modified_req.params is None: + modified_req.params = {} + else: + # To keep the original request object untouched. We must make + # a copy of the params dictionary. Because the copy of the + # original request directly refers to the params dictionary + # of the original request. + copy_params = req.params.copy() + modified_req.params = copy_params + + raw_qs = parsed_path.query + existing_qs = parse_qs_safe( + raw_qs, + keep_blank_values=True + ) + + # ``parse_qs`` will return lists. Don't do that unless there's a real, + # live list provided. + for key, value in existing_qs.items(): + if isinstance(value, (list, tuple)): + if len(value) == 1: + existing_qs[key] = value[0] + + modified_req.params.update(existing_qs) + return modified_req + + def payload(self, http_request): + if http_request.headers.get('x-amz-content-sha256'): + return http_request.headers['x-amz-content-sha256'] + + return super(S3HmacAuthV4Handler, self).payload(http_request) + + def add_auth(self, req, **kwargs): + if 'x-amz-content-sha256' not in req.headers: + if '_sha256' in req.headers: + req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256') + else: + req.headers['x-amz-content-sha256'] = self.payload(req) + updated_req = self.mangle_path_and_params(req) + return super(S3HmacAuthV4Handler, self).add_auth(updated_req, + unmangled_req=req, + **kwargs) + + def presign(self, req, expires, iso_date=None): + """ + Presign a request using SigV4 query params. Takes in an HTTP request + and an expiration time in seconds and returns a URL. + + http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html + """ + if iso_date is None: + iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ') + + region = self.determine_region_name(req.host) + service = self.determine_service_name(req.host) + + params = { + 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', + 'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % ( + self._provider.access_key, + iso_date[:8], + region, + service + ), + 'X-Amz-Date': iso_date, + 'X-Amz-Expires': expires, + 'X-Amz-SignedHeaders': 'host' + } + + if self._provider.security_token: + params['X-Amz-Security-Token'] = self._provider.security_token + + headers_to_sign = self.headers_to_sign(req) + l = sorted(['%s' % n.lower().strip() for n in headers_to_sign]) + params['X-Amz-SignedHeaders'] = ';'.join(l) + + req.params.update(params) + + cr = self.canonical_request(req) + + # We need to replace the payload SHA with a constant + cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD' + + # Date header is expected for string_to_sign, but unused otherwise + req.headers['X-Amz-Date'] = iso_date + + sts = self.string_to_sign(req, cr) + signature = self.signature(req, sts) + + # Add signature to params now that we have it + req.params['X-Amz-Signature'] = signature + + return '%s://%s%s?%s' % (req.protocol, req.host, req.path, + urllib.parse.urlencode(req.params)) + + +class STSAnonHandler(AuthHandler): + """ + Provides pure query construction (no actual signing). + + Used for making anonymous STS request for operations like + ``assume_role_with_web_identity``. + """ + + capability = ['sts-anon'] + + def _escape_value(self, value): + # This is changed from a previous version because this string is + # being passed to the query string and query strings must + # be url encoded. In particular STS requires the saml_response to + # be urlencoded when calling assume_role_with_saml. + return urllib.parse.quote(value) + + def _build_query_string(self, params): + keys = list(params.keys()) + keys.sort(key=lambda x: x.lower()) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(key + '=' + self._escape_value(val.decode('utf-8'))) + return '&'.join(pairs) + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + qs = self._build_query_string( + http_request.params + ) + boto.log.debug('query_string in body: %s' % qs) + headers['Content-Type'] = 'application/x-www-form-urlencoded' + # This will be a POST so the query string should go into the body + # as opposed to being in the uri + http_request.body = qs + + +class QuerySignatureHelper(HmacKeys): + """ + Helper for Query signature based Auth handler. + + Concrete sub class need to implement _calc_sigature method. + """ + + def add_auth(self, http_request, **kwargs): + headers = http_request.headers + params = http_request.params + params['AWSAccessKeyId'] = self._provider.access_key + params['SignatureVersion'] = self.SignatureVersion + params['Timestamp'] = boto.utils.get_ts() + qs, signature = self._calc_signature( + http_request.params, http_request.method, + http_request.auth_path, http_request.host) + boto.log.debug('query_string: %s Signature: %s' % (qs, signature)) + if http_request.method == 'POST': + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + http_request.body = qs + '&Signature=' + urllib.parse.quote_plus(signature) + http_request.headers['Content-Length'] = str(len(http_request.body)) + else: + http_request.body = '' + # if this is a retried request, the qs from the previous try will + # already be there, we need to get rid of that and rebuild it + http_request.path = http_request.path.split('?')[0] + http_request.path = (http_request.path + '?' + qs + + '&Signature=' + urllib.parse.quote_plus(signature)) + + +class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler): + """Provides Signature V0 Signing""" + + SignatureVersion = 0 + capability = ['sign-v0'] + + def _calc_signature(self, params, *args): + boto.log.debug('using _calc_signature_0') + hmac = self._get_hmac() + s = params['Action'] + params['Timestamp'] + hmac.update(s.encode('utf-8')) + keys = params.keys() + keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(key + '=' + urllib.parse.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + +class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler): + """ + Provides Query Signature V1 Authentication. + """ + + SignatureVersion = 1 + capability = ['sign-v1', 'mturk'] + + def __init__(self, *args, **kw): + QuerySignatureHelper.__init__(self, *args, **kw) + AuthHandler.__init__(self, *args, **kw) + self._hmac_256 = None + + def _calc_signature(self, params, *args): + boto.log.debug('using _calc_signature_1') + hmac = self._get_hmac() + keys = list(params.keys()) + keys.sort(key=lambda x: x.lower()) + pairs = [] + for key in keys: + hmac.update(key.encode('utf-8')) + val = boto.utils.get_utf8_value(params[key]) + hmac.update(val) + pairs.append(key + '=' + urllib.parse.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + +class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler): + """Provides Query Signature V2 Authentication.""" + + SignatureVersion = 2 + capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs', + 'sdb', 'iam', 'rds', 'sns', 'sqs', 'cloudformation'] + + def _calc_signature(self, params, verb, path, server_name): + boto.log.debug('using _calc_signature_2') + string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path) + hmac = self._get_hmac() + params['SignatureMethod'] = self.algorithm() + if self._provider.security_token: + params['SecurityToken'] = self._provider.security_token + keys = sorted(params.keys()) + pairs = [] + for key in keys: + val = boto.utils.get_utf8_value(params[key]) + pairs.append(urllib.parse.quote(key, safe='') + '=' + + urllib.parse.quote(val, safe='-_~')) + qs = '&'.join(pairs) + boto.log.debug('query string: %s' % qs) + string_to_sign += qs + boto.log.debug('string_to_sign: %s' % string_to_sign) + hmac.update(string_to_sign.encode('utf-8')) + b64 = base64.b64encode(hmac.digest()) + boto.log.debug('len(b64)=%d' % len(b64)) + boto.log.debug('base64 encoded digest: %s' % b64) + return (qs, b64) + + +class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler): + """ + Query Signature V2 Authentication relocating signed query + into the path and allowing POST requests with Content-Types. + """ + + capability = ['mws'] + + def add_auth(self, req, **kwargs): + req.params['AWSAccessKeyId'] = self._provider.access_key + req.params['SignatureVersion'] = self.SignatureVersion + req.params['Timestamp'] = boto.utils.get_ts() + qs, signature = self._calc_signature(req.params, req.method, + req.auth_path, req.host) + boto.log.debug('query_string: %s Signature: %s' % (qs, signature)) + if req.method == 'POST': + req.headers['Content-Length'] = str(len(req.body)) + req.headers['Content-Type'] = req.headers.get('Content-Type', + 'text/plain') + else: + req.body = '' + # if this is a retried req, the qs from the previous try will + # already be there, we need to get rid of that and rebuild it + req.path = req.path.split('?')[0] + req.path = (req.path + '?' + qs + + '&Signature=' + urllib.parse.quote_plus(signature)) + + +def get_auth_handler(host, config, provider, requested_capability=None): + """Finds an AuthHandler that is ready to authenticate. + + Lists through all the registered AuthHandlers to find one that is willing + to handle for the requested capabilities, config and provider. + + :type host: string + :param host: The name of the host + + :type config: + :param config: + + :type provider: + :param provider: + + Returns: + An implementation of AuthHandler. + + Raises: + boto.exception.NoAuthHandlerFound + """ + ready_handlers = [] + auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability) + for handler in auth_handlers: + try: + ready_handlers.append(handler(host, config, provider)) + except boto.auth_handler.NotReadyToAuthenticate: + pass + + if not ready_handlers: + checked_handlers = auth_handlers + names = [handler.__name__ for handler in checked_handlers] + raise boto.exception.NoAuthHandlerFound( + 'No handler was ready to authenticate. %d handlers were checked.' + ' %s ' + 'Check your credentials' % (len(names), str(names))) + + # We select the last ready auth handler that was loaded, to allow users to + # customize how auth works in environments where there are shared boto + # config files (e.g., /etc/boto.cfg and ~/.boto): The more general, + # system-wide shared configs should be loaded first, and the user's + # customizations loaded last. That way, for example, the system-wide + # config might include a plugin_directory that includes a service account + # auth plugin shared by all users of a Google Compute Engine instance + # (allowing sharing of non-user data between various services), and the + # user could override this with a .boto config that includes user-specific + # credentials (for access to user data). + return ready_handlers[-1] + + +def detect_potential_sigv4(func): + def _wrapper(self): + if os.environ.get('EC2_USE_SIGV4', False): + return ['hmac-v4'] + + if boto.config.get('ec2', 'use-sigv4', False): + return ['hmac-v4'] + + if hasattr(self, 'region'): + # If you're making changes here, you should also check + # ``boto/iam/connection.py``, as several things there are also + # endpoint-related. + if getattr(self.region, 'endpoint', ''): + for test in SIGV4_DETECT: + if test in self.region.endpoint: + return ['hmac-v4'] + + return func(self) + return _wrapper + + +def detect_potential_s3sigv4(func): + def _wrapper(self): + if os.environ.get('S3_USE_SIGV4', False): + return ['hmac-v4-s3'] + + if boto.config.get('s3', 'use-sigv4', False): + return ['hmac-v4-s3'] + + if not hasattr(self, 'host'): + return func(self) + + # Keep the old explicit logic in case somebody was adding to the list. + for test in SIGV4_DETECT: + if test in self.host: + return ['hmac-v4-s3'] + + # Use default for non-aws hosts. Adding a url scheme is necessary if + # not present for urlparse to properly function. + host = self.host + if not self.host.startswith('http://') or \ + self.host.startswith('https://'): + host = 'https://' + host + netloc = urlparse(host).netloc + if not (netloc.endswith('amazonaws.com') or + netloc.endswith('amazonaws.com.cn')): + return func(self) + + # Use the default for the global endpoint + if netloc.endswith('s3.amazonaws.com'): + return func(self) + + # Use the default for regions that support sigv4 and sigv2 + if any(test in self.host for test in S3_AUTH_DETECT): + return func(self) + + # Use anonymous if enabled. + if hasattr(self, 'anon') and self.anon: + return func(self) + + # Default to sigv4 for aws hosts outside of regions that are known + # to support sigv2 + return ['hmac-v4-s3'] + return _wrapper diff --git a/ext/boto/auth_handler.py b/ext/boto/auth_handler.py new file mode 100644 index 0000000000..a8583f8aa3 --- /dev/null +++ b/ext/boto/auth_handler.py @@ -0,0 +1,60 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Defines an interface which all Auth handlers need to implement. +""" + +from boto.plugin import Plugin + + +class NotReadyToAuthenticate(Exception): + pass + + +class AuthHandler(Plugin): + + capability = [] + + def __init__(self, host, config, provider): + """Constructs the handlers. + :type host: string + :param host: The host to which the request is being sent. + + :type config: boto.pyami.Config + :param config: Boto configuration. + + :type provider: boto.provider.Provider + :param provider: Provider details. + + Raises: + NotReadyToAuthenticate: if this handler is not willing to + authenticate for the given provider and config. + """ + pass + + def add_auth(self, http_request): + """Invoked to add authentication details to request. + + :type http_request: boto.connection.HTTPRequest + :param http_request: HTTP request that needs to be authenticated. + """ + pass diff --git a/ext/boto/awslambda/__init__.py b/ext/boto/awslambda/__init__.py new file mode 100644 index 0000000000..8c734284fa --- /dev/null +++ b/ext/boto/awslambda/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Lambda service. + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.awslambda.layer1 import AWSLambdaConnection + return get_regions('awslambda', + connection_cls=AWSLambdaConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.awslambda.layer1 import AWSLambdaConnection + return connect('awslambda', region_name, + connection_cls=AWSLambdaConnection, **kw_params) diff --git a/ext/boto/awslambda/exceptions.py b/ext/boto/awslambda/exceptions.py new file mode 100644 index 0000000000..7e1515117f --- /dev/null +++ b/ext/boto/awslambda/exceptions.py @@ -0,0 +1,38 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidRequestContentException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InvalidParameterValueException(BotoServerError): + pass + + +class ServiceException(BotoServerError): + pass diff --git a/ext/boto/awslambda/layer1.py b/ext/boto/awslambda/layer1.py new file mode 100644 index 0000000000..01603f6f02 --- /dev/null +++ b/ext/boto/awslambda/layer1.py @@ -0,0 +1,517 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os + +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.awslambda import exceptions + + +class AWSLambdaConnection(AWSAuthConnection): + """ + AWS Lambda + **Overview** + + This is the AWS Lambda API Reference. The AWS Lambda Developer + Guide provides additional information. For the service overview, + go to `What is AWS Lambda`_, and for information about how the + service works, go to `AWS LambdaL How it Works`_ in the AWS Lambda + Developer Guide. + """ + APIVersion = "2014-11-11" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "lambda.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestContentException": exceptions.InvalidRequestContentException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InvalidParameterValueException": exceptions.InvalidParameterValueException, + "ServiceException": exceptions.ServiceException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(AWSLambdaConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_event_source(self, event_source, function_name, role, + batch_size=None, parameters=None): + """ + Identifies an Amazon Kinesis stream as the event source for an + AWS Lambda function. AWS Lambda invokes the specified function + when records are posted to the stream. + + This is the pull model, where AWS Lambda invokes the function. + For more information, go to `AWS LambdaL How it Works`_ in the + AWS Lambda Developer Guide. + + This association between an Amazon Kinesis stream and an AWS + Lambda function is called the event source mapping. You + provide the configuration information (for example, which + stream to read from and which AWS Lambda function to invoke) + for the event source mapping in the request body. + + This operation requires permission for the `iam:PassRole` + action for the IAM role. It also requires permission for the + `lambda:AddEventSource` action. + + :type event_source: string + :param event_source: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream that is the event source. Any record added to this + stream causes AWS Lambda to invoke your Lambda function. AWS Lambda + POSTs the Amazon Kinesis event, containing records, to your Lambda + function as JSON. + + :type function_name: string + :param function_name: The Lambda function to invoke when AWS Lambda + detects an event on the stream. + + :type role: string + :param role: The ARN of the IAM role (invocation role) that AWS Lambda + can assume to read from the stream and invoke the function. + + :type batch_size: integer + :param batch_size: The largest number of records that AWS Lambda will + give to your function in a single event. The default is 100 + records. + + :type parameters: map + :param parameters: A map (key-value pairs) defining the configuration + for AWS Lambda to use when reading the event source. Currently, AWS + Lambda supports only the `InitialPositionInStream` key. The valid + values are: "TRIM_HORIZON" and "LATEST". The default value is + "TRIM_HORIZON". For more information, go to `ShardIteratorType`_ in + the Amazon Kinesis Service API Reference. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = { + 'EventSource': event_source, + 'FunctionName': function_name, + 'Role': role, + } + headers = {} + query_params = {} + if batch_size is not None: + params['BatchSize'] = batch_size + if parameters is not None: + params['Parameters'] = parameters + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def delete_function(self, function_name): + """ + Deletes the specified Lambda function code and configuration. + + This operation requires permission for the + `lambda:DeleteFunction` action. + + :type function_name: string + :param function_name: The Lambda function to delete. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('DELETE', uri, expected_status=204) + + def get_event_source(self, uuid): + """ + Returns configuration information for the specified event + source mapping (see AddEventSource). + + This operation requires permission for the + `lambda:GetEventSource` action. + + :type uuid: string + :param uuid: The AWS Lambda assigned ID of the event source mapping. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('GET', uri, expected_status=200) + + def get_function(self, function_name): + """ + Returns the configuration information of the Lambda function + and a presigned URL link to the .zip file you uploaded with + UploadFunction so you can download the .zip file. Note that + the URL is valid for up to 10 minutes. The configuration + information is the same information you provided as parameters + when uploading the function. + + This operation requires permission for the + `lambda:GetFunction` action. + + :type function_name: string + :param function_name: The Lambda function name. + + """ + + uri = '/2014-11-13/functions/{0}'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def get_function_configuration(self, function_name): + """ + Returns the configuration information of the Lambda function. + This the same information you provided as parameters when + uploading the function by using UploadFunction. + + This operation requires permission for the + `lambda:GetFunctionConfiguration` operation. + + :type function_name: string + :param function_name: The name of the Lambda function for which you + want to retrieve the configuration information. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + return self.make_request('GET', uri, expected_status=200) + + def invoke_async(self, function_name, invoke_args): + """ + Submits an invocation request to AWS Lambda. Upon receiving + the request, Lambda executes the specified function + asynchronously. To see the logs generated by the Lambda + function execution, see the CloudWatch logs console. + + This operation requires permission for the + `lambda:InvokeAsync` action. + + :type function_name: string + :param function_name: The Lambda function name. + + :type invoke_args: blob + :param invoke_args: JSON that you want to provide to your Lambda + function as input. + + """ + uri = '/2014-11-13/functions/{0}/invoke-async/'.format(function_name) + headers = {} + query_params = {} + try: + content_length = str(len(invoke_args)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + invoke_args.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``invoke_args`` must be seekable." + ) + content_length = str(os.fstat(invoke_args.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('POST', uri, expected_status=202, + data=invoke_args, headers=headers, + params=query_params) + + def list_event_sources(self, event_source_arn=None, function_name=None, + marker=None, max_items=None): + """ + Returns a list of event source mappings. For each mapping, the + API returns configuration information (see AddEventSource). + You can optionally specify filters to retrieve specific event + source mappings. + + This operation requires permission for the + `lambda:ListEventSources` action. + + :type event_source_arn: string + :param event_source_arn: The Amazon Resource Name (ARN) of the Amazon + Kinesis stream. + + :type function_name: string + :param function_name: The name of the AWS Lambda function. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListEventSources` operation. If present, specifies + to continue the list from where the returning call left off. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of + event sources to return in response. This value must be greater + than 0. + + """ + + uri = '/2014-11-13/event-source-mappings/' + params = {} + headers = {} + query_params = {} + if event_source_arn is not None: + query_params['EventSource'] = event_source_arn + if function_name is not None: + query_params['FunctionName'] = function_name + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_functions(self, marker=None, max_items=None): + """ + Returns a list of your Lambda functions. For each function, + the response includes the function configuration information. + You must use GetFunction to retrieve the code for your + function. + + This operation requires permission for the + `lambda:ListFunctions` action. + + :type marker: string + :param marker: Optional string. An opaque pagination token returned + from a previous `ListFunctions` operation. If present, indicates + where to continue the listing. + + :type max_items: integer + :param max_items: Optional integer. Specifies the maximum number of AWS + Lambda functions to return in response. This parameter value must + be greater than 0. + + """ + + uri = '/2014-11-13/functions/' + params = {} + headers = {} + query_params = {} + if marker is not None: + query_params['Marker'] = marker + if max_items is not None: + query_params['MaxItems'] = max_items + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def remove_event_source(self, uuid): + """ + Removes an event source mapping. This means AWS Lambda will no + longer invoke the function for events in the associated + source. + + This operation requires permission for the + `lambda:RemoveEventSource` action. + + :type uuid: string + :param uuid: The event source mapping ID. + + """ + + uri = '/2014-11-13/event-source-mappings/{0}'.format(uuid) + return self.make_request('DELETE', uri, expected_status=204) + + def update_function_configuration(self, function_name, role=None, + handler=None, description=None, + timeout=None, memory_size=None): + """ + Updates the configuration parameters for the specified Lambda + function by using the values provided in the request. You + provide only the parameters you want to change. This operation + must only be used on an existing Lambda function and cannot be + used to update the function's code. + + This operation requires permission for the + `lambda:UpdateFunctionConfiguration` action. + + :type function_name: string + :param function_name: The name of the Lambda function. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + will assume when it executes your function. + + :type handler: string + :param handler: The function that Lambda calls to begin executing your + function. For Node.js, it is the module-name.export value in your + function. + + :type description: string + :param description: A short user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, a database operation + might need less memory compared to an image processing function. + The default value is 128 MB. The value must be a multiple of 64 MB. + + """ + + uri = '/2014-11-13/functions/{0}/configuration'.format(function_name) + params = {} + headers = {} + query_params = {} + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + return self.make_request('PUT', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_function(self, function_name, function_zip, runtime, role, + handler, mode, description=None, timeout=None, + memory_size=None): + """ + Creates a new Lambda function or updates an existing function. + The function metadata is created from the request parameters, + and the code for the function is provided by a .zip file in + the request body. If the function name already exists, the + existing Lambda function is updated with the new code and + metadata. + + This operation requires permission for the + `lambda:UploadFunction` action. + + :type function_name: string + :param function_name: The name you want to assign to the function you + are uploading. The function names appear in the console and are + returned in the ListFunctions API. Function names are used to + specify functions to other AWS Lambda APIs, such as InvokeAsync. + + :type function_zip: blob + :param function_zip: A .zip file containing your packaged source code. + For more information about creating a .zip file, go to `AWS LambdaL + How it Works`_ in the AWS Lambda Developer Guide. + + :type runtime: string + :param runtime: The runtime environment for the Lambda function you are + uploading. Currently, Lambda supports only "nodejs" as the runtime. + + :type role: string + :param role: The Amazon Resource Name (ARN) of the IAM role that Lambda + assumes when it executes your function to access any other Amazon + Web Services (AWS) resources. + + :type handler: string + :param handler: The function that Lambda calls to begin execution. For + Node.js, it is the module-name . export value in your function. + + :type mode: string + :param mode: How the Lambda function will be invoked. Lambda supports + only the "event" mode. + + :type description: string + :param description: A short, user-defined function description. Lambda + does not use this value. Assign a meaningful description as you see + fit. + + :type timeout: integer + :param timeout: The function execution time at which Lambda should + terminate the function. Because the execution time has cost + implications, we recommend you set this value based on your + expected execution time. The default is 3 seconds. + + :type memory_size: integer + :param memory_size: The amount of memory, in MB, your Lambda function + is given. Lambda uses this memory size to infer the amount of CPU + allocated to your function. Your function use-case determines your + CPU and memory requirements. For example, database operation might + need less memory compared to image processing function. The default + value is 128 MB. The value must be a multiple of 64 MB. + + """ + uri = '/2014-11-13/functions/{0}'.format(function_name) + headers = {} + query_params = {} + if runtime is not None: + query_params['Runtime'] = runtime + if role is not None: + query_params['Role'] = role + if handler is not None: + query_params['Handler'] = handler + if mode is not None: + query_params['Mode'] = mode + if description is not None: + query_params['Description'] = description + if timeout is not None: + query_params['Timeout'] = timeout + if memory_size is not None: + query_params['MemorySize'] = memory_size + + try: + content_length = str(len(function_zip)) + except (TypeError, AttributeError): + # If a file like object is provided and seekable, try to retrieve + # the file size via fstat. + try: + function_zip.tell() + except (AttributeError, OSError, IOError): + raise TypeError( + "File-like object passed to parameter " + "``function_zip`` must be seekable." + ) + content_length = str(os.fstat(function_zip.fileno()).st_size) + headers['Content-Length'] = content_length + return self.make_request('PUT', uri, expected_status=201, + data=function_zip, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = response.read().decode('utf-8') + if body: + body = json.loads(body) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/ext/boto/beanstalk/__init__.py b/ext/boto/beanstalk/__init__.py new file mode 100644 index 0000000000..3667c6d0f5 --- /dev/null +++ b/ext/boto/beanstalk/__init__.py @@ -0,0 +1,44 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Elastic Beanstalk service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.beanstalk.layer1 import Layer1 + return get_regions( + 'elasticbeanstalk', + connection_cls=Layer1 + ) + + +def connect_to_region(region_name, **kw_params): + from boto.beanstalk.layer1 import Layer1 + return connect('elasticbeanstalk', region_name, connection_cls=Layer1, + **kw_params) diff --git a/ext/boto/beanstalk/exception.py b/ext/boto/beanstalk/exception.py new file mode 100644 index 0000000000..0fbd4ab9fa --- /dev/null +++ b/ext/boto/beanstalk/exception.py @@ -0,0 +1,63 @@ +import sys +from boto.compat import json +from boto.exception import BotoServerError + + +def simple(e): + code = e.code + + if code.endswith('Exception'): + code = code.rstrip('Exception') + + try: + # Dynamically get the error class. + simple_e = getattr(sys.modules[__name__], code)(e) + except AttributeError: + # Return original exception on failure. + return e + + return simple_e + + +class SimpleException(BotoServerError): + def __init__(self, e): + super(SimpleException, self).__init__(e.status, e.reason, e.body) + self.error_message = self.message + + def __repr__(self): + return self.__class__.__name__ + ': ' + self.error_message + def __str__(self): + return self.__class__.__name__ + ': ' + self.error_message + + +class ValidationError(SimpleException): pass + +# Common beanstalk exceptions. +class IncompleteSignature(SimpleException): pass +class InternalFailure(SimpleException): pass +class InvalidAction(SimpleException): pass +class InvalidClientTokenId(SimpleException): pass +class InvalidParameterCombination(SimpleException): pass +class InvalidParameterValue(SimpleException): pass +class InvalidQueryParameter(SimpleException): pass +class MalformedQueryString(SimpleException): pass +class MissingAction(SimpleException): pass +class MissingAuthenticationToken(SimpleException): pass +class MissingParameter(SimpleException): pass +class OptInRequired(SimpleException): pass +class RequestExpired(SimpleException): pass +class ServiceUnavailable(SimpleException): pass +class Throttling(SimpleException): pass + + +# Action specific exceptions. +class TooManyApplications(SimpleException): pass +class InsufficientPrivileges(SimpleException): pass +class S3LocationNotInServiceRegion(SimpleException): pass +class TooManyApplicationVersions(SimpleException): pass +class TooManyConfigurationTemplates(SimpleException): pass +class TooManyEnvironments(SimpleException): pass +class S3SubscriptionRequired(SimpleException): pass +class TooManyBuckets(SimpleException): pass +class OperationInProgress(SimpleException): pass +class SourceBundleDeletion(SimpleException): pass diff --git a/ext/boto/beanstalk/layer1.py b/ext/boto/beanstalk/layer1.py new file mode 100644 index 0000000000..e72ee23ec5 --- /dev/null +++ b/ext/boto/beanstalk/layer1.py @@ -0,0 +1,1201 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +import boto.jsonresponse +from boto.compat import json +from boto.regioninfo import RegionInfo +from boto.connection import AWSQueryConnection + + +class Layer1(AWSQueryConnection): + + APIVersion = '2010-12-01' + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'elasticbeanstalk.us-east-1.amazonaws.com' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(Layer1, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def _encode_bool(self, v): + v = bool(v) + return {True: "true", False: "false"}[v] + + def _get_response(self, action, params, path='/', verb='GET'): + params['ContentType'] = 'JSON' + response = self.make_request(action, params, path, verb) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + raise self.ResponseError(response.status, response.reason, body) + + def check_dns_availability(self, cname_prefix): + """Checks if the specified CNAME is available. + + :type cname_prefix: string + :param cname_prefix: The prefix used when this CNAME is + reserved. + """ + params = {'CNAMEPrefix': cname_prefix} + return self._get_response('CheckDNSAvailability', params) + + def create_application(self, application_name, description=None): + """ + Creates an application that has one configuration template + named default and no application versions. + + :type application_name: string + :param application_name: The name of the application. + Constraint: This name must be unique within your account. If the + specified name already exists, the action returns an + InvalidParameterValue error. + + :type description: string + :param description: Describes the application. + + :raises: TooManyApplicationsException + """ + params = {'ApplicationName': application_name} + if description: + params['Description'] = description + return self._get_response('CreateApplication', params) + + def create_application_version(self, application_name, version_label, + description=None, s3_bucket=None, + s3_key=None, auto_create_application=None): + """Creates an application version for the specified application. + + :type application_name: string + :param application_name: The name of the application. If no + application is found with this name, and AutoCreateApplication is + false, returns an InvalidParameterValue error. + + :type version_label: string + :param version_label: A label identifying this version. Constraint: + Must be unique per application. If an application version already + exists with this label for the specified application, AWS Elastic + Beanstalk returns an InvalidParameterValue error. + + :type description: string + :param description: Describes this version. + + :type s3_bucket: string + :param s3_bucket: The Amazon S3 bucket where the data is located. + + :type s3_key: string + :param s3_key: The Amazon S3 key where the data is located. Both + s3_bucket and s3_key must be specified in order to use a specific + source bundle. If both of these values are not specified the + sample application will be used. + + :type auto_create_application: boolean + :param auto_create_application: Determines how the system behaves if + the specified application for this version does not already exist: + true: Automatically creates the specified application for this + version if it does not already exist. false: Returns an + InvalidParameterValue if the specified application for this version + does not already exist. Default: false Valid Values: true | false + + :raises: TooManyApplicationsException, + TooManyApplicationVersionsException, + InsufficientPrivilegesException, + S3LocationNotInServiceRegionException + + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if description: + params['Description'] = description + if s3_bucket and s3_key: + params['SourceBundle.S3Bucket'] = s3_bucket + params['SourceBundle.S3Key'] = s3_key + if auto_create_application: + params['AutoCreateApplication'] = self._encode_bool( + auto_create_application) + return self._get_response('CreateApplicationVersion', params) + + def create_configuration_template(self, application_name, template_name, + solution_stack_name=None, + source_configuration_application_name=None, + source_configuration_template_name=None, + environment_id=None, description=None, + option_settings=None): + """Creates a configuration template. + + Templates are associated with a specific application and are used to + deploy different versions of the application with the same + configuration settings. + + :type application_name: string + :param application_name: The name of the application to associate with + this configuration template. If no application is found with this + name, AWS Elastic Beanstalk returns an InvalidParameterValue error. + + :type template_name: string + :param template_name: The name of the configuration template. + Constraint: This name must be unique per application. Default: If + a configuration template already exists with this name, AWS Elastic + Beanstalk returns an InvalidParameterValue error. + + :type solution_stack_name: string + :param solution_stack_name: The name of the solution stack used by this + configuration. The solution stack specifies the operating system, + architecture, and application server for a configuration template. + It determines the set of configuration options as well as the + possible and default values. Use ListAvailableSolutionStacks to + obtain a list of available solution stacks. Default: If the + SolutionStackName is not specified and the source configuration + parameter is blank, AWS Elastic Beanstalk uses the default solution + stack. If not specified and the source configuration parameter is + specified, AWS Elastic Beanstalk uses the same solution stack as + the source configuration template. + + :type source_configuration_application_name: string + :param source_configuration_application_name: The name of the + application associated with the configuration. + + :type source_configuration_template_name: string + :param source_configuration_template_name: The name of the + configuration template. + + :type environment_id: string + :param environment_id: The ID of the environment used with this + configuration template. + + :type description: string + :param description: Describes this configuration. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk sets the + specified configuration option to the requested value. The new + value overrides the value obtained from the solution stack or the + source configuration template. + + :raises: InsufficientPrivilegesException, + TooManyConfigurationTemplatesException + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if source_configuration_application_name: + params['SourceConfiguration.ApplicationName'] = source_configuration_application_name + if source_configuration_template_name: + params['SourceConfiguration.TemplateName'] = source_configuration_template_name + if environment_id: + params['EnvironmentId'] = environment_id + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + return self._get_response('CreateConfigurationTemplate', params) + + def create_environment(self, application_name, environment_name, + version_label=None, template_name=None, + solution_stack_name=None, cname_prefix=None, + description=None, option_settings=None, + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): + """Launches an environment for the application using a configuration. + + :type application_name: string + :param application_name: The name of the application that contains the + version to be deployed. If no application is found with this name, + CreateEnvironment returns an InvalidParameterValue error. + + :type environment_name: string + :param environment_name: A unique name for the deployment environment. + Used in the application URL. Constraint: Must be from 4 to 23 + characters in length. The name can contain only letters, numbers, + and hyphens. It cannot start or end with a hyphen. This name must + be unique in your account. If the specified name already exists, + AWS Elastic Beanstalk returns an InvalidParameterValue error. + Default: If the CNAME parameter is not specified, the environment + name becomes part of the CNAME, and therefore part of the visible + URL for your application. + + :type version_label: string + :param version_label: The name of the application version to deploy. If + the specified application has no associated application versions, + AWS Elastic Beanstalk UpdateEnvironment returns an + InvalidParameterValue error. Default: If not specified, AWS + Elastic Beanstalk attempts to launch the most recently created + application version. + + :type template_name: string + :param template_name: The name of the configuration template to + use in deployment. If no configuration template is found with this + name, AWS Elastic Beanstalk returns an InvalidParameterValue error. + Condition: You must specify either this parameter or a + SolutionStackName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type solution_stack_name: string + :param solution_stack_name: This is an alternative to specifying a + configuration name. If specified, AWS Elastic Beanstalk sets the + configuration values to the default values associated with the + specified solution stack. Condition: You must specify either this + or a TemplateName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type cname_prefix: string + :param cname_prefix: If specified, the environment attempts to use this + value as the prefix for the CNAME. If not specified, the + environment uses the environment name. + + :type description: string + :param description: Describes this environment. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk sets the + specified configuration options to the requested value in the + configuration set for the new environment. These override the + values obtained from the solution stack or the configuration + template. Each element in the list is a tuple of (Namespace, + OptionName, Value), for example:: + + [('aws:autoscaling:launchconfiguration', + 'Ec2KeyName', 'mykeypair')] + + :type options_to_remove: list + :param options_to_remove: A list of custom user-defined configuration + options to remove from the configuration set for this new + environment. + + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + + :raises: TooManyEnvironmentsException, InsufficientPrivilegesException + + """ + params = {'ApplicationName': application_name, + 'EnvironmentName': environment_name} + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if cname_prefix: + params['CNAMEPrefix'] = cname_prefix + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version + return self._get_response('CreateEnvironment', params) + + def create_storage_location(self): + """ + Creates the Amazon S3 storage location for the account. This + location is used to store user log files. + + :raises: TooManyBucketsException, + S3SubscriptionRequiredException, + InsufficientPrivilegesException + + """ + return self._get_response('CreateStorageLocation', params={}) + + def delete_application(self, application_name, + terminate_env_by_force=None): + """ + Deletes the specified application along with all associated + versions and configurations. The application versions will not + be deleted from your Amazon S3 bucket. + + :type application_name: string + :param application_name: The name of the application to delete. + + :type terminate_env_by_force: boolean + :param terminate_env_by_force: When set to true, running + environments will be terminated before deleting the application. + + :raises: OperationInProgressException + + """ + params = {'ApplicationName': application_name} + if terminate_env_by_force: + params['TerminateEnvByForce'] = self._encode_bool( + terminate_env_by_force) + return self._get_response('DeleteApplication', params) + + def delete_application_version(self, application_name, version_label, + delete_source_bundle=None): + """Deletes the specified version from the specified application. + + :type application_name: string + :param application_name: The name of the application to delete + releases from. + + :type version_label: string + :param version_label: The label of the version to delete. + + :type delete_source_bundle: boolean + :param delete_source_bundle: Indicates whether to delete the + associated source bundle from Amazon S3. Valid Values: true | + false + + :raises: SourceBundleDeletionException, + InsufficientPrivilegesException, + OperationInProgressException, + S3LocationNotInServiceRegionException + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if delete_source_bundle: + params['DeleteSourceBundle'] = self._encode_bool( + delete_source_bundle) + return self._get_response('DeleteApplicationVersion', params) + + def delete_configuration_template(self, application_name, template_name): + """Deletes the specified configuration template. + + :type application_name: string + :param application_name: The name of the application to delete + the configuration template from. + + :type template_name: string + :param template_name: The name of the configuration template to + delete. + + :raises: OperationInProgressException + + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + return self._get_response('DeleteConfigurationTemplate', params) + + def delete_environment_configuration(self, application_name, + environment_name): + """ + Deletes the draft configuration associated with the running + environment. Updating a running environment with any + configuration changes creates a draft configuration set. You can + get the draft configuration using DescribeConfigurationSettings + while the update is in progress or if the update fails. The + DeploymentStatus for the draft configuration indicates whether + the deployment is in process or has failed. The draft + configuration remains in existence until it is deleted with this + action. + + :type application_name: string + :param application_name: The name of the application the + environment is associated with. + + :type environment_name: string + :param environment_name: The name of the environment to delete + the draft configuration from. + + """ + params = {'ApplicationName': application_name, + 'EnvironmentName': environment_name} + return self._get_response('DeleteEnvironmentConfiguration', params) + + def describe_application_versions(self, application_name=None, + version_labels=None): + """Returns descriptions for existing application versions. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to only include ones that are associated + with the specified application. + + :type version_labels: list + :param version_labels: If specified, restricts the returned + descriptions to only include ones that have the specified version + labels. + + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_labels: + self.build_list_params(params, version_labels, + 'VersionLabels.member') + return self._get_response('DescribeApplicationVersions', params) + + def describe_applications(self, application_names=None): + """Returns the descriptions of existing applications. + + :type application_names: list + :param application_names: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to only include those with the specified + names. + + """ + params = {} + if application_names: + self.build_list_params(params, application_names, + 'ApplicationNames.member') + return self._get_response('DescribeApplications', params) + + def describe_configuration_options(self, application_name=None, + template_name=None, + environment_name=None, + solution_stack_name=None, options=None): + """Describes configuration options used in a template or environment. + + Describes the configuration options that are used in a + particular configuration template or environment, or that a + specified solution stack defines. The description includes the + values the options, their default values, and an indication of + the required action on a running environment if an option value + is changed. + + :type application_name: string + :param application_name: The name of the application associated with + the configuration template or environment. Only needed if you want + to describe the configuration options associated with either the + configuration template or environment. + + :type template_name: string + :param template_name: The name of the configuration template whose + configuration options you want to describe. + + :type environment_name: string + :param environment_name: The name of the environment whose + configuration options you want to describe. + + :type solution_stack_name: string + :param solution_stack_name: The name of the solution stack whose + configuration options you want to describe. + + :type options: list + :param options: If specified, restricts the descriptions to only + the specified options. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + if solution_stack_name: + params['SolutionStackName'] = solution_stack_name + if options: + self.build_list_params(params, options, 'Options.member') + return self._get_response('DescribeConfigurationOptions', params) + + def describe_configuration_settings(self, application_name, + template_name=None, + environment_name=None): + """ + Returns a description of the settings for the specified + configuration set, that is, either a configuration template or + the configuration set associated with a running environment. + When describing the settings for the configuration set + associated with a running environment, it is possible to receive + two sets of setting descriptions. One is the deployed + configuration set, and the other is a draft configuration of an + environment that is either in the process of deployment or that + failed to deploy. + + :type application_name: string + :param application_name: The application for the environment or + configuration template. + + :type template_name: string + :param template_name: The name of the configuration template to + describe. Conditional: You must specify either this parameter or + an EnvironmentName, but not both. If you specify both, AWS Elastic + Beanstalk returns an InvalidParameterCombination error. If you do + not specify either, AWS Elastic Beanstalk returns a + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to describe. + Condition: You must specify either this or a TemplateName, but not + both. If you specify both, AWS Elastic Beanstalk returns an + InvalidParameterCombination error. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + """ + params = {'ApplicationName': application_name} + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('DescribeConfigurationSettings', params) + + def describe_environment_resources(self, environment_id=None, + environment_name=None): + """Returns AWS resources for this environment. + + :type environment_id: string + :param environment_id: The ID of the environment to retrieve AWS + resource usage data. Condition: You must specify either this or an + EnvironmentName, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to retrieve + AWS resource usage data. Condition: You must specify either this + or an EnvironmentId, or both. If you do not specify either, AWS + Elastic Beanstalk returns MissingRequiredParameter error. + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('DescribeEnvironmentResources', params) + + def describe_environments(self, application_name=None, version_label=None, + environment_ids=None, environment_names=None, + include_deleted=None, + included_deleted_back_to=None): + """Returns descriptions for existing environments. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that are associated + with this application. + + :type version_label: string + :param version_label: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to include only those that are associated + with this application version. + + :type environment_ids: list + :param environment_ids: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that have the + specified IDs. + + :type environment_names: list + :param environment_names: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those that have the + specified names. + + :type include_deleted: boolean + :param include_deleted: Indicates whether to include deleted + environments: true: Environments that have been deleted after + IncludedDeletedBackTo are displayed. false: Do not include deleted + environments. + + :type included_deleted_back_to: timestamp + :param included_deleted_back_to: If specified when IncludeDeleted is + set to true, then environments deleted after this date are + displayed. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_label: + params['VersionLabel'] = version_label + if environment_ids: + self.build_list_params(params, environment_ids, + 'EnvironmentIds.member') + if environment_names: + self.build_list_params(params, environment_names, + 'EnvironmentNames.member') + if include_deleted: + params['IncludeDeleted'] = self._encode_bool(include_deleted) + if included_deleted_back_to: + params['IncludedDeletedBackTo'] = included_deleted_back_to + return self._get_response('DescribeEnvironments', params) + + def describe_events(self, application_name=None, version_label=None, + template_name=None, environment_id=None, + environment_name=None, request_id=None, severity=None, + start_time=None, end_time=None, max_records=None, + next_token=None): + """Returns event descriptions matching criteria up to the last 6 weeks. + + :type application_name: string + :param application_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to include only those associated with + this application. + + :type version_label: string + :param version_label: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those associated with this application + version. + + :type template_name: string + :param template_name: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that are associated with this + environment configuration. + + :type environment_id: string + :param environment_id: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to those associated with this + environment. + + :type environment_name: string + :param environment_name: If specified, AWS Elastic Beanstalk restricts + the returned descriptions to those associated with this + environment. + + :type request_id: string + :param request_id: If specified, AWS Elastic Beanstalk restricts the + described events to include only those associated with this request + ID. + + :type severity: string + :param severity: If specified, limits the events returned from this + call to include only those with the specified severity or higher. + + :type start_time: timestamp + :param start_time: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that occur on or after this time. + + :type end_time: timestamp + :param end_time: If specified, AWS Elastic Beanstalk restricts the + returned descriptions to those that occur up to, but not including, + the EndTime. + + :type max_records: integer + :param max_records: Specifies the maximum number of events that can be + returned, beginning with the most recent event. + + :type next_token: string + :param next_token: Pagination token. If specified, the events return + the next batch of results. + """ + params = {} + if application_name: + params['ApplicationName'] = application_name + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if request_id: + params['RequestId'] = request_id + if severity: + params['Severity'] = severity + if start_time: + params['StartTime'] = start_time + if end_time: + params['EndTime'] = end_time + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self._get_response('DescribeEvents', params) + + def list_available_solution_stacks(self): + """Returns a list of the available solution stack names.""" + return self._get_response('ListAvailableSolutionStacks', params={}) + + def rebuild_environment(self, environment_id=None, environment_name=None): + """ + Deletes and recreates all of the AWS resources (for example: + the Auto Scaling group, load balancer, etc.) for a specified + environment and forces a restart. + + :type environment_id: string + :param environment_id: The ID of the environment to rebuild. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to rebuild. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RebuildEnvironment', params) + + def request_environment_info(self, info_type='tail', environment_id=None, + environment_name=None): + """ + Initiates a request to compile the specified type of + information of the deployed environment. Setting the InfoType + to tail compiles the last lines from the application server log + files of every Amazon EC2 instance in your environment. Use + RetrieveEnvironmentInfo to access the compiled information. + + :type info_type: string + :param info_type: The type of information to request. + + :type environment_id: string + :param environment_id: The ID of the environment of the + requested data. If no such environment is found, + RequestEnvironmentInfo returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment of the + requested data. If no such environment is found, + RequestEnvironmentInfo returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + """ + params = {'InfoType': info_type} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RequestEnvironmentInfo', params) + + def restart_app_server(self, environment_id=None, environment_name=None): + """ + Causes the environment to restart the application container + server running on each Amazon EC2 instance. + + :type environment_id: string + :param environment_id: The ID of the environment to restart the server + for. Condition: You must specify either this or an + EnvironmentName, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to restart the + server for. Condition: You must specify either this or an + EnvironmentId, or both. If you do not specify either, AWS Elastic + Beanstalk returns MissingRequiredParameter error. + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RestartAppServer', params) + + def retrieve_environment_info(self, info_type='tail', environment_id=None, + environment_name=None): + """ + Retrieves the compiled information from a RequestEnvironmentInfo + request. + + :type info_type: string + :param info_type: The type of information to retrieve. + + :type environment_id: string + :param environment_id: The ID of the data's environment. If no such + environment is found, returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the data's environment. If no such + environment is found, returns an InvalidParameterValue error. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + """ + params = {'InfoType': info_type} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('RetrieveEnvironmentInfo', params) + + def swap_environment_cnames(self, source_environment_id=None, + source_environment_name=None, + destination_environment_id=None, + destination_environment_name=None): + """Swaps the CNAMEs of two environments. + + :type source_environment_id: string + :param source_environment_id: The ID of the source environment. + Condition: You must specify at least the SourceEnvironmentID or the + SourceEnvironmentName. You may also specify both. If you specify + the SourceEnvironmentId, you must specify the + DestinationEnvironmentId. + + :type source_environment_name: string + :param source_environment_name: The name of the source environment. + Condition: You must specify at least the SourceEnvironmentID or the + SourceEnvironmentName. You may also specify both. If you specify + the SourceEnvironmentName, you must specify the + DestinationEnvironmentName. + + :type destination_environment_id: string + :param destination_environment_id: The ID of the destination + environment. Condition: You must specify at least the + DestinationEnvironmentID or the DestinationEnvironmentName. You may + also specify both. You must specify the SourceEnvironmentId with + the DestinationEnvironmentId. + + :type destination_environment_name: string + :param destination_environment_name: The name of the destination + environment. Condition: You must specify at least the + DestinationEnvironmentID or the DestinationEnvironmentName. You may + also specify both. You must specify the SourceEnvironmentName with + the DestinationEnvironmentName. + """ + params = {} + if source_environment_id: + params['SourceEnvironmentId'] = source_environment_id + if source_environment_name: + params['SourceEnvironmentName'] = source_environment_name + if destination_environment_id: + params['DestinationEnvironmentId'] = destination_environment_id + if destination_environment_name: + params['DestinationEnvironmentName'] = destination_environment_name + return self._get_response('SwapEnvironmentCNAMEs', params) + + def terminate_environment(self, environment_id=None, environment_name=None, + terminate_resources=None): + """Terminates the specified environment. + + :type environment_id: string + :param environment_id: The ID of the environment to terminate. + Condition: You must specify either this or an EnvironmentName, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to terminate. + Condition: You must specify either this or an EnvironmentId, or + both. If you do not specify either, AWS Elastic Beanstalk returns + MissingRequiredParameter error. + + :type terminate_resources: boolean + :param terminate_resources: Indicates whether the associated AWS + resources should shut down when the environment is terminated: + true: (default) The user AWS resources (for example, the Auto + Scaling group, LoadBalancer, etc.) are terminated along with the + environment. false: The environment is removed from the AWS + Elastic Beanstalk but the AWS resources continue to operate. For + more information, see the AWS Elastic Beanstalk User Guide. + Default: true Valid Values: true | false + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if terminate_resources: + params['TerminateResources'] = self._encode_bool( + terminate_resources) + return self._get_response('TerminateEnvironment', params) + + def update_application(self, application_name, description=None): + """ + Updates the specified application to have the specified + properties. + + :type application_name: string + :param application_name: The name of the application to update. + If no such application is found, UpdateApplication returns an + InvalidParameterValue error. + + :type description: string + :param description: A new description for the application. Default: If + not specified, AWS Elastic Beanstalk does not update the + description. + """ + params = {'ApplicationName': application_name} + if description: + params['Description'] = description + return self._get_response('UpdateApplication', params) + + def update_application_version(self, application_name, version_label, + description=None): + """Updates the application version to have the properties. + + :type application_name: string + :param application_name: The name of the application associated with + this version. If no application is found with this name, + UpdateApplication returns an InvalidParameterValue error. + + :type version_label: string + :param version_label: The name of the version to update. If no + application version is found with this label, UpdateApplication + returns an InvalidParameterValue error. + + :type description: string + :param description: A new description for this release. + """ + params = {'ApplicationName': application_name, + 'VersionLabel': version_label} + if description: + params['Description'] = description + return self._get_response('UpdateApplicationVersion', params) + + def update_configuration_template(self, application_name, template_name, + description=None, option_settings=None, + options_to_remove=None): + """ + Updates the specified configuration template to have the + specified properties or configuration option values. + + :type application_name: string + :param application_name: The name of the application associated with + the configuration template to update. If no application is found + with this name, UpdateConfigurationTemplate returns an + InvalidParameterValue error. + + :type template_name: string + :param template_name: The name of the configuration template to update. + If no configuration template is found with this name, + UpdateConfigurationTemplate returns an InvalidParameterValue error. + + :type description: string + :param description: A new description for the configuration. + + :type option_settings: list + :param option_settings: A list of configuration option settings to + update with the new specified option value. + + :type options_to_remove: list + :param options_to_remove: A list of configuration options to remove + from the configuration set. Constraint: You can remove only + UserDefined configuration options. + + :raises: InsufficientPrivilegesException + """ + params = {'ApplicationName': application_name, + 'TemplateName': template_name} + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + return self._get_response('UpdateConfigurationTemplate', params) + + def update_environment(self, environment_id=None, environment_name=None, + version_label=None, template_name=None, + description=None, option_settings=None, + options_to_remove=None, tier_name=None, + tier_type=None, tier_version='1.0'): + """ + Updates the environment description, deploys a new application + version, updates the configuration settings to an entirely new + configuration template, or updates select configuration option + values in the running environment. Attempting to update both + the release and configuration is not allowed and AWS Elastic + Beanstalk returns an InvalidParameterCombination error. When + updating the configuration settings to a new template or + individual settings, a draft configuration is created and + DescribeConfigurationSettings for this environment returns two + setting descriptions with different DeploymentStatus values. + + :type environment_id: string + :param environment_id: The ID of the environment to update. If no + environment with this ID exists, AWS Elastic Beanstalk returns an + InvalidParameterValue error. Condition: You must specify either + this or an EnvironmentName, or both. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + + :type environment_name: string + :param environment_name: The name of the environment to update. If no + environment with this name exists, AWS Elastic Beanstalk returns an + InvalidParameterValue error. Condition: You must specify either + this or an EnvironmentId, or both. If you do not specify either, + AWS Elastic Beanstalk returns MissingRequiredParameter error. + + :type version_label: string + :param version_label: If this parameter is specified, AWS Elastic + Beanstalk deploys the named application version to the environment. + If no such application version is found, returns an + InvalidParameterValue error. + + :type template_name: string + :param template_name: If this parameter is specified, AWS Elastic + Beanstalk deploys this configuration template to the environment. + If no such configuration template is found, AWS Elastic Beanstalk + returns an InvalidParameterValue error. + + :type description: string + :param description: If this parameter is specified, AWS Elastic + Beanstalk updates the description of this environment. + + :type option_settings: list + :param option_settings: If specified, AWS Elastic Beanstalk updates the + configuration set associated with the running environment and sets + the specified configuration options to the requested value. + + :type options_to_remove: list + :param options_to_remove: A list of custom user-defined configuration + options to remove from the configuration set for this environment. + + :type tier_name: string + :param tier_name: The name of the tier. Valid values are + "WebServer" and "Worker". Defaults to "WebServer". + The ``tier_name`` and a ``tier_type`` parameters are + related and the values provided must be valid. + The possible combinations are: + + * "WebServer" and "Standard" (the default) + * "Worker" and "SQS/HTTP" + + :type tier_type: string + :param tier_type: The type of the tier. Valid values are + "Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP" + if ``tier_name`` is "Worker". Defaults to "Standard". + + :type tier_version: string + :type tier_version: The version of the tier. Valid values + currently are "1.0". Defaults to "1.0". + + :raises: InsufficientPrivilegesException + """ + params = {} + if environment_id: + params['EnvironmentId'] = environment_id + if environment_name: + params['EnvironmentName'] = environment_name + if version_label: + params['VersionLabel'] = version_label + if template_name: + params['TemplateName'] = template_name + if description: + params['Description'] = description + if option_settings: + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if options_to_remove: + self.build_list_params(params, options_to_remove, + 'OptionsToRemove.member') + if tier_name and tier_type and tier_version: + params['Tier.Name'] = tier_name + params['Tier.Type'] = tier_type + params['Tier.Version'] = tier_version + return self._get_response('UpdateEnvironment', params) + + def validate_configuration_settings(self, application_name, + option_settings, template_name=None, + environment_name=None): + """ + Takes a set of configuration settings and either a + configuration template or environment, and determines whether + those values are valid. This action returns a list of messages + indicating any errors or warnings associated with the selection + of option values. + + :type application_name: string + :param application_name: The name of the application that the + configuration template or environment belongs to. + + :type template_name: string + :param template_name: The name of the configuration template to + validate the settings against. Condition: You cannot specify both + this and an environment name. + + :type environment_name: string + :param environment_name: The name of the environment to validate the + settings against. Condition: You cannot specify both this and a + configuration template name. + + :type option_settings: list + :param option_settings: A list of the options and desired values to + evaluate. + + :raises: InsufficientPrivilegesException + """ + params = {'ApplicationName': application_name} + self._build_list_params(params, option_settings, + 'OptionSettings.member', + ('Namespace', 'OptionName', 'Value')) + if template_name: + params['TemplateName'] = template_name + if environment_name: + params['EnvironmentName'] = environment_name + return self._get_response('ValidateConfigurationSettings', params) + + def _build_list_params(self, params, user_values, prefix, tuple_names): + # For params such as the ConfigurationOptionSettings, + # they can specify a list of tuples where each tuple maps to a specific + # arg. For example: + # user_values = [('foo', 'bar', 'baz'] + # prefix=MyOption.member + # tuple_names=('One', 'Two', 'Three') + # would result in: + # MyOption.member.1.One = foo + # MyOption.member.1.Two = bar + # MyOption.member.1.Three = baz + for i, user_value in enumerate(user_values, 1): + current_prefix = '%s.%s' % (prefix, i) + for key, value in zip(tuple_names, user_value): + full_key = '%s.%s' % (current_prefix, key) + params[full_key] = value diff --git a/ext/boto/beanstalk/response.py b/ext/boto/beanstalk/response.py new file mode 100644 index 0000000000..8128ba1fed --- /dev/null +++ b/ext/boto/beanstalk/response.py @@ -0,0 +1,704 @@ +"""Classify responses from layer1 and strict type values.""" +from datetime import datetime +from boto.compat import six + + +class BaseObject(object): + + def __repr__(self): + result = self.__class__.__name__ + '{ ' + counter = 0 + for key, value in six.iteritems(self.__dict__): + # first iteration no comma + counter += 1 + if counter > 1: + result += ', ' + result += key + ': ' + result += self._repr_by_type(value) + result += ' }' + return result + + def _repr_by_type(self, value): + # Everything is either a 'Response', 'list', or 'None/str/int/bool'. + result = '' + if isinstance(value, Response): + result += value.__repr__() + elif isinstance(value, list): + result += self._repr_list(value) + else: + result += str(value) + return result + + def _repr_list(self, array): + result = '[' + for value in array: + result += ' ' + self._repr_by_type(value) + ',' + # Check for trailing comma with a space. + if len(result) > 1: + result = result[:-1] + ' ' + result += ']' + return result + + +class Response(BaseObject): + def __init__(self, response): + super(Response, self).__init__() + + if response['ResponseMetadata']: + self.response_metadata = ResponseMetadata(response['ResponseMetadata']) + else: + self.response_metadata = None + + +class ResponseMetadata(BaseObject): + def __init__(self, response): + super(ResponseMetadata, self).__init__() + + self.request_id = str(response['RequestId']) + + +class ApplicationDescription(BaseObject): + def __init__(self, response): + super(ApplicationDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.configuration_templates = [] + if response['ConfigurationTemplates']: + for member in response['ConfigurationTemplates']: + configuration_template = str(member) + self.configuration_templates.append(configuration_template) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.versions = [] + if response['Versions']: + for member in response['Versions']: + version = str(member) + self.versions.append(version) + + +class ApplicationVersionDescription(BaseObject): + def __init__(self, response): + super(ApplicationVersionDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + if response['SourceBundle']: + self.source_bundle = S3Location(response['SourceBundle']) + else: + self.source_bundle = None + self.version_label = str(response['VersionLabel']) + + +class AutoScalingGroup(BaseObject): + def __init__(self, response): + super(AutoScalingGroup, self).__init__() + + self.name = str(response['Name']) + + +class ConfigurationOptionDescription(BaseObject): + def __init__(self, response): + super(ConfigurationOptionDescription, self).__init__() + + self.change_severity = str(response['ChangeSeverity']) + self.default_value = str(response['DefaultValue']) + self.max_length = int(response['MaxLength']) if response['MaxLength'] else None + self.max_value = int(response['MaxValue']) if response['MaxValue'] else None + self.min_value = int(response['MinValue']) if response['MinValue'] else None + self.name = str(response['Name']) + self.namespace = str(response['Namespace']) + if response['Regex']: + self.regex = OptionRestrictionRegex(response['Regex']) + else: + self.regex = None + self.user_defined = str(response['UserDefined']) + self.value_options = [] + if response['ValueOptions']: + for member in response['ValueOptions']: + value_option = str(member) + self.value_options.append(value_option) + self.value_type = str(response['ValueType']) + + +class ConfigurationOptionSetting(BaseObject): + def __init__(self, response): + super(ConfigurationOptionSetting, self).__init__() + + self.namespace = str(response['Namespace']) + self.option_name = str(response['OptionName']) + self.value = str(response['Value']) + + +class ConfigurationSettingsDescription(BaseObject): + def __init__(self, response): + super(ConfigurationSettingsDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class EnvironmentDescription(BaseObject): + def __init__(self, response): + super(EnvironmentDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class EnvironmentInfoDescription(BaseObject): + def __init__(self, response): + super(EnvironmentInfoDescription, self).__init__() + + self.ec2_instance_id = str(response['Ec2InstanceId']) + self.info_type = str(response['InfoType']) + self.message = str(response['Message']) + self.sample_timestamp = datetime.fromtimestamp(response['SampleTimestamp']) + + +class EnvironmentResourceDescription(BaseObject): + def __init__(self, response): + super(EnvironmentResourceDescription, self).__init__() + + self.auto_scaling_groups = [] + if response['AutoScalingGroups']: + for member in response['AutoScalingGroups']: + auto_scaling_group = AutoScalingGroup(member) + self.auto_scaling_groups.append(auto_scaling_group) + self.environment_name = str(response['EnvironmentName']) + self.instances = [] + if response['Instances']: + for member in response['Instances']: + instance = Instance(member) + self.instances.append(instance) + self.launch_configurations = [] + if response['LaunchConfigurations']: + for member in response['LaunchConfigurations']: + launch_configuration = LaunchConfiguration(member) + self.launch_configurations.append(launch_configuration) + self.load_balancers = [] + if response['LoadBalancers']: + for member in response['LoadBalancers']: + load_balancer = LoadBalancer(member) + self.load_balancers.append(load_balancer) + self.triggers = [] + if response['Triggers']: + for member in response['Triggers']: + trigger = Trigger(member) + self.triggers.append(trigger) + + +class EnvironmentResourcesDescription(BaseObject): + def __init__(self, response): + super(EnvironmentResourcesDescription, self).__init__() + + if response['LoadBalancer']: + self.load_balancer = LoadBalancerDescription(response['LoadBalancer']) + else: + self.load_balancer = None + + +class EventDescription(BaseObject): + def __init__(self, response): + super(EventDescription, self).__init__() + + self.application_name = str(response['ApplicationName']) + self.environment_name = str(response['EnvironmentName']) + self.event_date = datetime.fromtimestamp(response['EventDate']) + self.message = str(response['Message']) + self.request_id = str(response['RequestId']) + self.severity = str(response['Severity']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class Instance(BaseObject): + def __init__(self, response): + super(Instance, self).__init__() + + self.id = str(response['Id']) + + +class LaunchConfiguration(BaseObject): + def __init__(self, response): + super(LaunchConfiguration, self).__init__() + + self.name = str(response['Name']) + + +class Listener(BaseObject): + def __init__(self, response): + super(Listener, self).__init__() + + self.port = int(response['Port']) if response['Port'] else None + self.protocol = str(response['Protocol']) + + +class LoadBalancer(BaseObject): + def __init__(self, response): + super(LoadBalancer, self).__init__() + + self.name = str(response['Name']) + + +class LoadBalancerDescription(BaseObject): + def __init__(self, response): + super(LoadBalancerDescription, self).__init__() + + self.domain = str(response['Domain']) + self.listeners = [] + if response['Listeners']: + for member in response['Listeners']: + listener = Listener(member) + self.listeners.append(listener) + self.load_balancer_name = str(response['LoadBalancerName']) + + +class OptionRestrictionRegex(BaseObject): + def __init__(self, response): + super(OptionRestrictionRegex, self).__init__() + + self.label = response['Label'] + self.pattern = response['Pattern'] + + +class SolutionStackDescription(BaseObject): + def __init__(self, response): + super(SolutionStackDescription, self).__init__() + + self.permitted_file_types = [] + if response['PermittedFileTypes']: + for member in response['PermittedFileTypes']: + permitted_file_type = str(member) + self.permitted_file_types.append(permitted_file_type) + self.solution_stack_name = str(response['SolutionStackName']) + + +class S3Location(BaseObject): + def __init__(self, response): + super(S3Location, self).__init__() + + self.s3_bucket = str(response['S3Bucket']) + self.s3_key = str(response['S3Key']) + + +class Trigger(BaseObject): + def __init__(self, response): + super(Trigger, self).__init__() + + self.name = str(response['Name']) + + +class ValidationMessage(BaseObject): + def __init__(self, response): + super(ValidationMessage, self).__init__() + + self.message = str(response['Message']) + self.namespace = str(response['Namespace']) + self.option_name = str(response['OptionName']) + self.severity = str(response['Severity']) + + +# These are the response objects layer2 uses, one for each layer1 api call. +class CheckDNSAvailabilityResponse(Response): + def __init__(self, response): + response = response['CheckDNSAvailabilityResponse'] + super(CheckDNSAvailabilityResponse, self).__init__(response) + + response = response['CheckDNSAvailabilityResult'] + self.fully_qualified_cname = str(response['FullyQualifiedCNAME']) + self.available = bool(response['Available']) + + +# Our naming convension produces this class name but api names it with more +# capitals. +class CheckDnsAvailabilityResponse(CheckDNSAvailabilityResponse): pass + + +class CreateApplicationResponse(Response): + def __init__(self, response): + response = response['CreateApplicationResponse'] + super(CreateApplicationResponse, self).__init__(response) + + response = response['CreateApplicationResult'] + if response['Application']: + self.application = ApplicationDescription(response['Application']) + else: + self.application = None + + +class CreateApplicationVersionResponse(Response): + def __init__(self, response): + response = response['CreateApplicationVersionResponse'] + super(CreateApplicationVersionResponse, self).__init__(response) + + response = response['CreateApplicationVersionResult'] + if response['ApplicationVersion']: + self.application_version = ApplicationVersionDescription(response['ApplicationVersion']) + else: + self.application_version = None + + +class CreateConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['CreateConfigurationTemplateResponse'] + super(CreateConfigurationTemplateResponse, self).__init__(response) + + response = response['CreateConfigurationTemplateResult'] + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class CreateEnvironmentResponse(Response): + def __init__(self, response): + response = response['CreateEnvironmentResponse'] + super(CreateEnvironmentResponse, self).__init__(response) + + response = response['CreateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class CreateStorageLocationResponse(Response): + def __init__(self, response): + response = response['CreateStorageLocationResponse'] + super(CreateStorageLocationResponse, self).__init__(response) + + response = response['CreateStorageLocationResult'] + self.s3_bucket = str(response['S3Bucket']) + + +class DeleteApplicationResponse(Response): + def __init__(self, response): + response = response['DeleteApplicationResponse'] + super(DeleteApplicationResponse, self).__init__(response) + + +class DeleteApplicationVersionResponse(Response): + def __init__(self, response): + response = response['DeleteApplicationVersionResponse'] + super(DeleteApplicationVersionResponse, self).__init__(response) + + +class DeleteConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['DeleteConfigurationTemplateResponse'] + super(DeleteConfigurationTemplateResponse, self).__init__(response) + + +class DeleteEnvironmentConfigurationResponse(Response): + def __init__(self, response): + response = response['DeleteEnvironmentConfigurationResponse'] + super(DeleteEnvironmentConfigurationResponse, self).__init__(response) + + +class DescribeApplicationVersionsResponse(Response): + def __init__(self, response): + response = response['DescribeApplicationVersionsResponse'] + super(DescribeApplicationVersionsResponse, self).__init__(response) + + response = response['DescribeApplicationVersionsResult'] + self.application_versions = [] + if response['ApplicationVersions']: + for member in response['ApplicationVersions']: + application_version = ApplicationVersionDescription(member) + self.application_versions.append(application_version) + + +class DescribeApplicationsResponse(Response): + def __init__(self, response): + response = response['DescribeApplicationsResponse'] + super(DescribeApplicationsResponse, self).__init__(response) + + response = response['DescribeApplicationsResult'] + self.applications = [] + if response['Applications']: + for member in response['Applications']: + application = ApplicationDescription(member) + self.applications.append(application) + + +class DescribeConfigurationOptionsResponse(Response): + def __init__(self, response): + response = response['DescribeConfigurationOptionsResponse'] + super(DescribeConfigurationOptionsResponse, self).__init__(response) + + response = response['DescribeConfigurationOptionsResult'] + self.options = [] + if response['Options']: + for member in response['Options']: + option = ConfigurationOptionDescription(member) + self.options.append(option) + self.solution_stack_name = str(response['SolutionStackName']) + + +class DescribeConfigurationSettingsResponse(Response): + def __init__(self, response): + response = response['DescribeConfigurationSettingsResponse'] + super(DescribeConfigurationSettingsResponse, self).__init__(response) + + response = response['DescribeConfigurationSettingsResult'] + self.configuration_settings = [] + if response['ConfigurationSettings']: + for member in response['ConfigurationSettings']: + configuration_setting = ConfigurationSettingsDescription(member) + self.configuration_settings.append(configuration_setting) + + +class DescribeEnvironmentResourcesResponse(Response): + def __init__(self, response): + response = response['DescribeEnvironmentResourcesResponse'] + super(DescribeEnvironmentResourcesResponse, self).__init__(response) + + response = response['DescribeEnvironmentResourcesResult'] + if response['EnvironmentResources']: + self.environment_resources = EnvironmentResourceDescription(response['EnvironmentResources']) + else: + self.environment_resources = None + + +class DescribeEnvironmentsResponse(Response): + def __init__(self, response): + response = response['DescribeEnvironmentsResponse'] + super(DescribeEnvironmentsResponse, self).__init__(response) + + response = response['DescribeEnvironmentsResult'] + self.environments = [] + if response['Environments']: + for member in response['Environments']: + environment = EnvironmentDescription(member) + self.environments.append(environment) + + +class DescribeEventsResponse(Response): + def __init__(self, response): + response = response['DescribeEventsResponse'] + super(DescribeEventsResponse, self).__init__(response) + + response = response['DescribeEventsResult'] + self.events = [] + if response['Events']: + for member in response['Events']: + event = EventDescription(member) + self.events.append(event) + self.next_tokent = str(response['NextToken']) + + +class ListAvailableSolutionStacksResponse(Response): + def __init__(self, response): + response = response['ListAvailableSolutionStacksResponse'] + super(ListAvailableSolutionStacksResponse, self).__init__(response) + + response = response['ListAvailableSolutionStacksResult'] + self.solution_stack_details = [] + if response['SolutionStackDetails']: + for member in response['SolutionStackDetails']: + solution_stack_detail = SolutionStackDescription(member) + self.solution_stack_details.append(solution_stack_detail) + self.solution_stacks = [] + if response['SolutionStacks']: + for member in response['SolutionStacks']: + solution_stack = str(member) + self.solution_stacks.append(solution_stack) + + +class RebuildEnvironmentResponse(Response): + def __init__(self, response): + response = response['RebuildEnvironmentResponse'] + super(RebuildEnvironmentResponse, self).__init__(response) + + +class RequestEnvironmentInfoResponse(Response): + def __init__(self, response): + response = response['RequestEnvironmentInfoResponse'] + super(RequestEnvironmentInfoResponse, self).__init__(response) + + +class RestartAppServerResponse(Response): + def __init__(self, response): + response = response['RestartAppServerResponse'] + super(RestartAppServerResponse, self).__init__(response) + + +class RetrieveEnvironmentInfoResponse(Response): + def __init__(self, response): + response = response['RetrieveEnvironmentInfoResponse'] + super(RetrieveEnvironmentInfoResponse, self).__init__(response) + + response = response['RetrieveEnvironmentInfoResult'] + self.environment_info = [] + if response['EnvironmentInfo']: + for member in response['EnvironmentInfo']: + environment_info = EnvironmentInfoDescription(member) + self.environment_info.append(environment_info) + + +class SwapEnvironmentCNAMEsResponse(Response): + def __init__(self, response): + response = response['SwapEnvironmentCNAMEsResponse'] + super(SwapEnvironmentCNAMEsResponse, self).__init__(response) + + +class SwapEnvironmentCnamesResponse(SwapEnvironmentCNAMEsResponse): pass + + +class TerminateEnvironmentResponse(Response): + def __init__(self, response): + response = response['TerminateEnvironmentResponse'] + super(TerminateEnvironmentResponse, self).__init__(response) + + response = response['TerminateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class UpdateApplicationResponse(Response): + def __init__(self, response): + response = response['UpdateApplicationResponse'] + super(UpdateApplicationResponse, self).__init__(response) + + response = response['UpdateApplicationResult'] + if response['Application']: + self.application = ApplicationDescription(response['Application']) + else: + self.application = None + + +class UpdateApplicationVersionResponse(Response): + def __init__(self, response): + response = response['UpdateApplicationVersionResponse'] + super(UpdateApplicationVersionResponse, self).__init__(response) + + response = response['UpdateApplicationVersionResult'] + if response['ApplicationVersion']: + self.application_version = ApplicationVersionDescription(response['ApplicationVersion']) + else: + self.application_version = None + + +class UpdateConfigurationTemplateResponse(Response): + def __init__(self, response): + response = response['UpdateConfigurationTemplateResponse'] + super(UpdateConfigurationTemplateResponse, self).__init__(response) + + response = response['UpdateConfigurationTemplateResult'] + self.application_name = str(response['ApplicationName']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.deployment_status = str(response['DeploymentStatus']) + self.description = str(response['Description']) + self.environment_name = str(response['EnvironmentName']) + self.option_settings = [] + if response['OptionSettings']: + for member in response['OptionSettings']: + option_setting = ConfigurationOptionSetting(member) + self.option_settings.append(option_setting) + self.solution_stack_name = str(response['SolutionStackName']) + self.template_name = str(response['TemplateName']) + + +class UpdateEnvironmentResponse(Response): + def __init__(self, response): + response = response['UpdateEnvironmentResponse'] + super(UpdateEnvironmentResponse, self).__init__(response) + + response = response['UpdateEnvironmentResult'] + self.application_name = str(response['ApplicationName']) + self.cname = str(response['CNAME']) + self.date_created = datetime.fromtimestamp(response['DateCreated']) + self.date_updated = datetime.fromtimestamp(response['DateUpdated']) + self.description = str(response['Description']) + self.endpoint_url = str(response['EndpointURL']) + self.environment_id = str(response['EnvironmentId']) + self.environment_name = str(response['EnvironmentName']) + self.health = str(response['Health']) + if response['Resources']: + self.resources = EnvironmentResourcesDescription(response['Resources']) + else: + self.resources = None + self.solution_stack_name = str(response['SolutionStackName']) + self.status = str(response['Status']) + self.template_name = str(response['TemplateName']) + self.version_label = str(response['VersionLabel']) + + +class ValidateConfigurationSettingsResponse(Response): + def __init__(self, response): + response = response['ValidateConfigurationSettingsResponse'] + super(ValidateConfigurationSettingsResponse, self).__init__(response) + + response = response['ValidateConfigurationSettingsResult'] + self.messages = [] + if response['Messages']: + for member in response['Messages']: + message = ValidationMessage(member) + self.messages.append(message) diff --git a/ext/boto/beanstalk/wrapper.py b/ext/boto/beanstalk/wrapper.py new file mode 100644 index 0000000000..eea1124a9e --- /dev/null +++ b/ext/boto/beanstalk/wrapper.py @@ -0,0 +1,29 @@ +"""Wraps layer1 api methods and converts layer1 dict responses to objects.""" +from boto.beanstalk.layer1 import Layer1 +import boto.beanstalk.response +from boto.exception import BotoServerError +import boto.beanstalk.exception as exception + + +def beanstalk_wrapper(func, name): + def _wrapped_low_level_api(*args, **kwargs): + try: + response = func(*args, **kwargs) + except BotoServerError as e: + raise exception.simple(e) + # Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'. + cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response' + cls = getattr(boto.beanstalk.response, cls_name) + return cls(response) + return _wrapped_low_level_api + + +class Layer1Wrapper(object): + def __init__(self, *args, **kwargs): + self.api = Layer1(*args, **kwargs) + + def __getattr__(self, name): + try: + return beanstalk_wrapper(getattr(self.api, name), name) + except AttributeError: + raise AttributeError("%s has no attribute %r" % (self, name)) diff --git a/ext/boto/cacerts/__init__.py b/ext/boto/cacerts/__init__.py new file mode 100644 index 0000000000..1b2dec79e7 --- /dev/null +++ b/ext/boto/cacerts/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2010 Google Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/ext/boto/cacerts/cacerts.txt b/ext/boto/cacerts/cacerts.txt new file mode 100644 index 0000000000..514ab1bfad --- /dev/null +++ b/ext/boto/cacerts/cacerts.txt @@ -0,0 +1,3837 @@ +## +## boto/cacerts/cacerts.txt -- Bundle of CA Root Certificates +## +## Certificate data from Mozilla downloaded on: Wed Aug 20 03:12:04 2014 +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## http://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl verison 1.22. +## SHA1: bf2c15b3019e696660321d2227d942936dc50aa7 +## + + +GTE CyberTrust Global Root +========================== +-----BEGIN CERTIFICATE----- +MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg +Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG +A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz +MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL +Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 +IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u +sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql +HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID +AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW +M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF +NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ +-----END CERTIFICATE----- + +Thawte Server CA +================ +-----BEGIN CERTIFICATE----- +MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE +AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j +b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV +BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u +c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG +A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 +ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl +/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 +1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J +GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ +GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= +-----END CERTIFICATE----- + +Thawte Premium Server CA +======================== +-----BEGIN CERTIFICATE----- +MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT +DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs +dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE +AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl +ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT +AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU +VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 +aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ +cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 +aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh +Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ +qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm +SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf +8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t +UCemDaYj+bvLpgcUQg== +-----END CERTIFICATE----- + +Equifax Secure CA +================= +-----BEGIN CERTIFICATE----- +MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE +ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 +MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT +B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR +fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW +8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG +A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE +CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG +A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS +spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB +Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 +zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB +BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 +70+sB3c4 +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G2 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT +MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz +dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO +FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 +lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB +MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT +1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD +Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 +-----END CERTIFICATE----- + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +GlobalSign Root CA - R2 +======================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 +ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp +s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN +S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL +TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C +ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i +YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN +BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp +9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu +01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 +9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- + +ValiCert Class 1 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy +MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi +GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm +DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG +lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX +icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP +Orf1LXLI +-----END CERTIFICATE----- + +ValiCert Class 2 VA +=================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC +CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf +ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ +SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV +UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 +W9ViH0Pd +-----END CERTIFICATE----- + +RSA Root Certificate 1 +====================== +-----BEGIN CERTIFICATE----- +MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp +b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs +YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh +bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw +MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 +d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg +UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 +LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA +A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td +3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H +BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs +3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF +V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r +on+jjBXu +-----END CERTIFICATE----- + +Verisign Class 3 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 +EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc +cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw +EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj +055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f +j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 +xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa +t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- + +Verisign Class 4 Public Primary Certification Authority - G3 +============================================================ +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy +dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS +tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM +8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW +Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX +Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA +j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt +mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm +fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd +RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG +UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== +-----END CERTIFICATE----- + +Entrust.net Secure Server CA +============================ +-----BEGIN CERTIFICATE----- +MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV +BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg +cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl +ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG +A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi +eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p +dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ +aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 +gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw +ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw +CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l +dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF +bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu +dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw +NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow +HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA +BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN +Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 +n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Equifax Secure Global eBusiness CA +================================== +-----BEGIN CERTIFICATE----- +MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp +bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx +HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds +b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV +PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN +qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn +hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j +BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs +MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN +I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY +NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV +-----END CERTIFICATE----- + +Equifax Secure eBusiness CA 1 +============================= +-----BEGIN CERTIFICATE----- +MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB +LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE +ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz +IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ +1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a +IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk +MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW +Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF +AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 +lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ +KpYrtWKmpj29f5JZzVoqgrI3eQ== +-----END CERTIFICATE----- + +AddTrust Low-Value Services Root +================================ +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU +cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw +CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO +ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 +54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr +oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 +Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui +GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w +HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT +RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw +HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt +ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph +iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr +mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj +ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- + +AddTrust External Root +====================== +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD +VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw +NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU +cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg +Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 ++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw +Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo +aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy +2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 +7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL +VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk +VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl +j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 +e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u +G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- + +AddTrust Public Services Root +============================= +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU +cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ +BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l +dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu +nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i +d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG +Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw +HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G +A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G +A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 +JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL ++YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao +GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 +Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H +EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= +-----END CERTIFICATE----- + +AddTrust Qualified Certificates Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU +cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx +CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ +IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx +64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 +KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o +L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR +wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU +MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE +BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y +azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG +GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X +dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze +RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB +iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +RSA Security 2048 v3 +==================== +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK +ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy +MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb +BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 +Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb +WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH +KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP ++Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E +FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY +v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj +0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj +VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 +nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA +pKnXwiJPZ9d37CAFYd4= +-----END CERTIFICATE----- + +GeoTrust Global CA +================== +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw +MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo +BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet +8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc +T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU +vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk +DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q +zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 +d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 +mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p +XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm +Mw== +-----END CERTIFICATE----- + +GeoTrust Global CA 2 +==================== +-----BEGIN CERTIFICATE----- +MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw +MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j +LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ +NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k +LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA +Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b +HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH +K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 +srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh +ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL +OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC +x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF +H4z1Ir+rzoPz4iIprn2DQKi6bA== +-----END CERTIFICATE----- + +GeoTrust Universal CA +===================== +-----BEGIN CERTIFICATE----- +MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 +MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu +Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t +JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e +RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs +7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d +8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V +qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga +Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB +Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu +KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 +ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 +XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc +aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 +qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL +oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK +xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF +KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 +DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK +xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU +p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI +P/rmMuGNG2+k5o7Y+SlIis5z/iw= +-----END CERTIFICATE----- + +GeoTrust Universal CA 2 +======================= +-----BEGIN CERTIFICATE----- +MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN +R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 +MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg +SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 +DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 +j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q +JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a +QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 +WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP +20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn +ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC +SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG +8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 ++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z +dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ +4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ +mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq +A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg +Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP +pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d +FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp +gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm +X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS +-----END CERTIFICATE----- + +America Online Root Certification Authority 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG +v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z +DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh +sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP +8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z +o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf +GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF +VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft +3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g +Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds +sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 +-----END CERTIFICATE----- + +America Online Root Certification Authority 2 +============================================= +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT +QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG +A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg +T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en +fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 +f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO +qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN +RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 +gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn +6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid +FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 +Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj +B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op +aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY +T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p ++DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg +JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy +zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO +ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh +1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf +GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff +Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP +cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= +-----END CERTIFICATE----- + +Visa eCommerce Root +=================== +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG +EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug +QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 +WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm +VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL +F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b +RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 +TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI +/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs +GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc +CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW +YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz +zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu +YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- + +Certum Root CA +============== +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK +ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla +Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u +by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x +wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL +kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ +89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K +Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P +NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ +GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg +GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ +0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS +qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +Comodo Secure Services root +=========================== +-----BEGIN CERTIFICATE----- +MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw +MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu +Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi +BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP +9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc +rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC +oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V +p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E +FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj +YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm +aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm +4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj +Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL +DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw +pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H +RR3B7Hzs/Sk= +-----END CERTIFICATE----- + +Comodo Trusted Services root +============================ +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw +MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h +bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw +IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 +3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y +/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 +juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS +ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud +DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp +ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl +cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw +uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 +pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA +BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l +R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O +9y5Xt5hwXsjEeLBi +-----END CERTIFICATE----- + +QuoVadis Root CA +================ +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE +ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz +MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp +cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD +EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk +J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL +F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL +YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen +AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w +PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y +ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 +MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj +YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs +ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW +Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu +BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw +FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 +tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo +fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul +LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x +gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi +5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi +5nrQNiOKSnQ2+Q== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +Sonera Class 2 Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG +U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw +NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh +IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 +/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT +dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG +f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P +tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH +nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT +XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt +0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI +cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph +Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx +EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH +llpwrN9M +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA +============================= +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE +ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w +HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh +bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt +vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P +jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca +C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth +vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 +22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV +HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v +dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN +BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR +EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw +MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y +nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR +iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== +-----END CERTIFICATE----- + +TDC Internet Root CA +==================== +-----BEGIN CERTIFICATE----- +MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE +ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx +NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu +ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j +xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL +znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc +5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 +otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI +AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM +VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM +MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC +AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe +UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G +CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m +gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ +2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb +O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU +Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l +-----END CERTIFICATE----- + +UTN DATACorp SGC Root CA +======================== +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ +BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa +MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w +HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy +dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys +raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo +wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA +9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv +33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud +DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 +BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD +LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 +DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 +I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx +EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP +DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- + +UTN USERFirst Hardware Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE +BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl +IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd +BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx +OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 +eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz +ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI +wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd +tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 +i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf +Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw +gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF +UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF +BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW +XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 +lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn +iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 +nfhmqA== +-----END CERTIFICATE----- + +Camerfirma Chambers of Commerce Root +==================================== +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx +NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp +cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn +MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU +xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH +NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW +DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV +d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud +EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v +cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P +AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh +bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD +VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi +fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD +L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN +UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n +ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 +erfutGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- + +Camerfirma Global Chambersign Root +================================== +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe +QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i +ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx +NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt +YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg +MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw +ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J +1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O +by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl +6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c +8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ +BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j +aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B +Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj +aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y +ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA +PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y +gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ +PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 +IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes +t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- + +NetLock Notary (Class A) Root +============================= +-----BEGIN CERTIFICATE----- +MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI +EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j +ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX +DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH +EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD +VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz +cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM +D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ +z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC +/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 +tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 +4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG +A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC +Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv +bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu +IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn +LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 +ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz +IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh +IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu +b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh +bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg +Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp +bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 +ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP +ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB +CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr +KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM +8CgHrTwXZoi1/baI +-----END CERTIFICATE----- + +NetLock Business (Class B) Root +=============================== +-----BEGIN CERTIFICATE----- +MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg +VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD +VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv +bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg +VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB +iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S +o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr +1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV +HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ +RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh +dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 +ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv +c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg +YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh +c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz +Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA +bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl +IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 +YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj +cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM +43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR +stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI +-----END CERTIFICATE----- + +NetLock Express (Class C) Root +============================== +-----BEGIN CERTIFICATE----- +MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT +CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV +BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD +KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ +BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 +dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j +ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z +W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 +euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw +DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN +RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn +YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB +IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i +aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 +ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs +ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo +dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y +emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k +IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ +UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg +YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 +xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW +gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj +YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH +AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw +Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg +U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 +LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh +cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT +dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC +AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh +3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm +vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk +fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 +fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ +EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl +1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ +lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro +g14= +-----END CERTIFICATE----- + +Taiwan GRCA +=========== +-----BEGIN CERTIFICATE----- +MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG +EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X +DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv +dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN +w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 +BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O +1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO +htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov +J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 +Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t +B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB +O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 +lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV +HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 +09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ +TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj +Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 +Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU +D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz +DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk +Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk +7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ +CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy ++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS +-----END CERTIFICATE----- + +Swisscom Root CA 1 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 +MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM +MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF +NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe +AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC +b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn +7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN +cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp +WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 +haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY +MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 +MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn +jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ +MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H +VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl +vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl +OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 +1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq +nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy +x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW +NY6E0F/6MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +Certplus Class 2 Primary CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE +BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN +OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy +dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR +5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ +Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO +YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e +e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME +CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ +YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t +L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD +P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R +TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ +7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW +//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- + +DST Root CA X3 +============== +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK +ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X +DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 +cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT +rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 +UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy +xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d +utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ +MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug +dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE +GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw +RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS +fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- + +DST ACES CA X6 +============== +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT +MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha +MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE +CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI +DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa +pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow +GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy +MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu +Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy +dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU +CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 +5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t +Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs +vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 +oKfN5XozNmr6mis= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 1 +============================================== +-----BEGIN CERTIFICATE----- +MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP +MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 +acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx +MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg +U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB +TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC +aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX +yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i +Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ +8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 +W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME +BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 +sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE +q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy +B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY +nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2 +============================================== +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN +MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr +dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G +A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls +acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe +LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI +x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g +QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr +5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB +AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt +Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 +Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ +hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P +9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 +UrbnBEI= +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx +CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ +cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN +b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 +nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge +RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt +tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI +hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K +Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN +NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa +Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG +1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- + +thawte Primary Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 +MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg +SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv +KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT +FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs +oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ +1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc +q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K +aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p +afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF +AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE +uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 +jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH +z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G5 +============================================================ +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln +biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh +dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt +YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz +j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD +Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ +Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r +fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ +BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv +Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG +SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ +X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE +KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC +Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE +ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +WellsSecure Public Root Certificate Authority +============================================= +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM +F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw +NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN +MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl +bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD +VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 +iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 +i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 +bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB +K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB +AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu +cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm +lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB +i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww +GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI +K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 +bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj +qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es +E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ +tylv2G0xffX8oRAHh84vWdw+WNs= +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +IGC/A +===== +-----BEGIN CERTIFICATE----- +MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD +VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE +Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy +MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI +EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT +STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 +TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW +So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy +HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd +frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ +tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB +egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC +iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK +q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q +MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg +Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI +lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF +0mBWWg== +-----END CERTIFICATE----- + +Security Communication EV RootCA1 +================================= +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE +BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl +Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO +/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX +WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z +ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 +bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK +9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG +SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm +iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG +Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW +mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW +T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GA CA +=============================== +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE +BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG +A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH +bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD +VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw +IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 +IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 +Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg +Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD +d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ +/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R +LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm +MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 ++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY +okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE +BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL +EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 +MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz +dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT +GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG +d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N +oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc +QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ +PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb +MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG +IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD +VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 +LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A +dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn +AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA +4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg +AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA +egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 +Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO +PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv +c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h +cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw +IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT +WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV +MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER +MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp +Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal +HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT +nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE +aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a +86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK +yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB +S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. +====================================== +-----BEGIN CERTIFICATE----- +MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT +AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg +LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w +HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ +U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh +IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN +yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU +2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 +4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP +2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm +8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf +HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa +Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK +5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b +czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g +ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF +BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug +cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf +AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX +EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v +/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 +MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 +3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk +eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f +/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h +RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU +Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 2 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw +MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw +IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 +xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ +Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u +SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G +dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ +KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj +TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP +JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk +vQ== +-----END CERTIFICATE----- + +TC TrustCenter Class 3 CA II +============================ +-----BEGIN CERTIFICATE----- +MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy +IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw +MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 +c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE +AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W +yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo +6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ +uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk +2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB +7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 +Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU +cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i +SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u +TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE +O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 +yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 +IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal +092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc +5A== +-----END CERTIFICATE----- + +TC TrustCenter Universal CA I +============================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC +REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy +IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN +MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg +VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw +JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC +qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv +xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw +ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O +gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j +BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG +1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy +vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 +ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT +ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a +7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY +-----END CERTIFICATE----- + +Deutsche Telekom Root CA 2 +========================== +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT +RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG +A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 +MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G +A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS +b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 +bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI +KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY +AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK +Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV +jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV +HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr +E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy +zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 +rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G +dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- + +ComSign Secured CA +================== +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE +AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w +NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD +QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs +49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH +7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB +kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 +9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw +AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t +U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA +j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC +AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a +BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp +FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP +51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- + +Cybertrust Global Root +====================== +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li +ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 +MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD +ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA ++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW +0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL +AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin +89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT +8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 +MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G +A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO +lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi +5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 +hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T +X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW +WL1WMRJOEcgh4LMRkWXbtKaIOM5V +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 +============================================================================================================================= +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH +DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q +aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry +b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV +BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg +S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 +MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl +IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF +n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl +IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft +dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl +cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO +Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 +xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR +6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd +BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 +N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT +y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh +LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M +dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= +-----END CERTIFICATE----- + +Buypass Class 2 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 +MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M +cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 +0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 +0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R +uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV +1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt +7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 +fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w +wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho +-----END CERTIFICATE----- + +Buypass Class 3 CA 1 +==================== +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 +MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh +c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx +ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 +n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia +AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c +1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P +AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 +pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA +EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 +htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj +el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 +-----END CERTIFICATE----- + +EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 +========================================================================== +-----BEGIN CERTIFICATE----- +MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg +QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe +Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt +IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by +X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b +gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr +eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ +TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy +Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn +uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI +qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm +ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 +Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW +Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t +FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm +zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k +XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT +bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU +RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK +1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt +2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ +Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 +AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +CNNIC ROOT +========== +-----BEGIN CERTIFICATE----- +MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE +ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw +OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD +o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz +VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT +VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or +czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK +y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC +wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S +lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 +Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM +O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 +BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 +G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m +mxE= +-----END CERTIFICATE----- + +ApplicationCA - Japanese Government +=================================== +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT +SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw +MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl +cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 +fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN +wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE +jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu +nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU +WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV +BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD +vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs +o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g +/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD +io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW +dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL +rosot4LKGAfmt1t06SAZf7IbiVQ= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G3 +============================================= +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE +BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy +eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz +NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo +YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT +LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j +K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE +c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C +IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu +dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr +2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 +cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE +Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s +t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- + +thawte Primary Root CA - G2 +=========================== +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC +VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu +IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg +Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV +MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG +b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt +IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS +LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 +8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU +mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN +G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K +rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- + +thawte Primary Root CA - G3 +=========================== +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE +BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 +aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w +ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD +VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG +A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At +P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC ++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY +7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW +vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ +KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK +A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC +8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm +er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- + +GeoTrust Primary Certification Authority - G2 +============================================= +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 +OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl +b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG +BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc +KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ +EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m +ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 +npaqBA+K +-----END CERTIFICATE----- + +VeriSign Universal Root Certification Authority +=============================================== +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE +BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO +ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk +IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV +UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv +cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj +1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP +MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 +9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I +AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR +tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G +CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O +a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 +Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx +Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx +P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P +wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 +mJO37M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- + +VeriSign Class 3 Public Primary Certification Authority - G4 +============================================================ +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC +VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 +b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz +ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU +cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo +b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 +IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 +Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz +rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw +HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u +Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD +A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx +AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) FÅ‘tanúsítvány +============================================ +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Staat der Nederlanden Root CA - G2 +================================== +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC +TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l +ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ +5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn +vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj +CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil +e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR +OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI +CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 +48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi +trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 +qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB +AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC +ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA +A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz ++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj +f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN +kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk +CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF +URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb +CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h +oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV +IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm +66+KAQ== +-----END CERTIFICATE----- + +CA Disig +======== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK +QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw +MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz +bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm +GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD +Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo +hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt +ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w +gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P +AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz +aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff +ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa +BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t +WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 +mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ +CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K +ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA +4Z7CRneC9VkGjCFMhwnN5ag= +-----END CERTIFICATE----- + +Juur-SK +======= +-----BEGIN CERTIFICATE----- +MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA +c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw +DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG +SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy +aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf +TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC ++Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw +UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa +Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF +MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD +HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh +AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA +cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr +AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw +cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE +FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G +A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo +ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL +abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 +IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh +Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 +yyqcjg== +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +ACEDICOM Root +============= +-----BEGIN CERTIFICATE----- +MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD +T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 +MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG +A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk +WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD +YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew +MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb +m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk +HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT +xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 +3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 +2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq +TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz +4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU +9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv +bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg +aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP +eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk +zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 +ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI +KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq +nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE +I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp +MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o +tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi +=================================================== +-----BEGIN CERTIFICATE----- +MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz +ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 +MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 +cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u +aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY +8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y +jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI +JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk +9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD +AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG +SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d +F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq +D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 +Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq +fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Chambers of Commerce Root - 2008 +================================ +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy +Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl +ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF +EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl +cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA +XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj +h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ +ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk +NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g +D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 +lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ +0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 +EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI +G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ +BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh +bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh +bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC +CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH +AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 +wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH +3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU +RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 +M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 +YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF +9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK +zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG +nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ +-----END CERTIFICATE----- + +Global Chambersign Root - 2008 +============================== +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD +MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv +bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu +QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx +NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg +Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ +QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf +VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf +XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 +ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB +/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA +TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M +H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe +Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF +HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB +AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT +BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE +BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm +aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm +aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp +1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 +dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG +/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 +ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s +dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg +9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH +foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du +qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr +P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq +c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +Certinomis - Autorité Racine +============================= +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK +Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg +LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG +A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw +JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa +wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly +Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw +2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N +jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q +c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC +lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb +xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g +530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna +4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x +WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva +R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 +nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B +CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv +JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE +qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b +WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE +wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ +vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- + +Root CA Generalitat Valenciana +============================== +-----BEGIN CERTIFICATE----- +MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE +ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 +IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 +WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE +CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 +F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B +ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ +D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte +JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB +AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n +dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB +ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl +AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA +YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy +AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA +aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt +AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA +YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu +AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA +OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 +dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV +BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G +A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S +b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh +TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz +Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 +NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH +iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt ++GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= +-----END CERTIFICATE----- + +A-Trust-nQual-03 +================ +-----BEGIN CERTIFICATE----- +MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE +Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy +a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R +dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw +RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 +ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 +c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA +zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n +yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE +SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 +iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V +cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV +eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 +ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr +sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd +JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS +mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 +ahq97BvIxYSazQ== +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +EC-ACC +====== +-----BEGIN CERTIFICATE----- +MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE +BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w +ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD +VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE +CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT +BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 +MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt +SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl +Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh +cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK +w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT +ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 +HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a +E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw +0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD +VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 +Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l +dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ +lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa +Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe +l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 +E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D +5EI= +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2011 +======================================================= +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT +O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y +aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT +AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z +IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo +IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI +1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa +71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u +8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH +3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ +MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 +MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu +b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt +XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD +/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N +7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Trustis FPS Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 +IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV +BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ +RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk +H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa +cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt +o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA +AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd +BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c +GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC +yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P +8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV +l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl +iB6XzCGcKQENZetX2fNXlrtIzYE= +-----END CERTIFICATE----- + +StartCom Certification Authority +================================ +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu +ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 +NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk +LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg +U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y +o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ +Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d +eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt +2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z +6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ +osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ +untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc +UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT +37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ +Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 +dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu +c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv +bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 +aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t +L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG +cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 +fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm +N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN +Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T +tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX +e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA +2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs +HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib +D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= +-----END CERTIFICATE----- + +StartCom Certification Authority G2 +=================================== +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN +U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE +ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O +o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG +4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi +Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul +Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs +O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H +vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L +nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS +FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa +z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ +KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk +J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ +JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG +/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc +nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld +blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc +l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm +7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm +obp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +EE Certification Centre Root CA +=============================== +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG +EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy +dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw +MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB +UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy +ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM +TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 +rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw +93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN +P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ +MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF +BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj +xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM +lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU +3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM +dcGWxZ0= +-----END CERTIFICATE----- + +TURKTRUST Certificate Services Provider Root 2007 +================================================= +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF +bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP +MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg +QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X +DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl +a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN +BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp +bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N +YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv +KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya +KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT +rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC +AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s +Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO +Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb +BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK +poRq0Tl9 +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +PSCProcert +========== +-----BEGIN CERTIFICATE----- +MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk +ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ +MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz +dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl +cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw +IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw +MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w +DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD +ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp +Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC +wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA +3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh +RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO +EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2 +0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH +0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU +td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw +Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp +r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/ +AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz +Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId +xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp +ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH +EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h +Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k +ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG +9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG +MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG +LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52 +ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy +YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v +Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o +dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq +T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN +g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q +uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1 +n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn +FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo +5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq +3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5 +poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y +eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km +-----END CERTIFICATE----- + +China Internet Network Information Center EV Certificates Root +============================================================== +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgIESJ8AATANBgkqhkiG9w0BAQUFADCBijELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyMUcwRQYDVQQDDD5D +aGluYSBJbnRlcm5ldCBOZXR3b3JrIEluZm9ybWF0aW9uIENlbnRlciBFViBDZXJ0aWZpY2F0ZXMg +Um9vdDAeFw0xMDA4MzEwNzExMjVaFw0zMDA4MzEwNzExMjVaMIGKMQswCQYDVQQGEwJDTjEyMDAG +A1UECgwpQ2hpbmEgSW50ZXJuZXQgTmV0d29yayBJbmZvcm1hdGlvbiBDZW50ZXIxRzBFBgNVBAMM +PkNoaW5hIEludGVybmV0IE5ldHdvcmsgSW5mb3JtYXRpb24gQ2VudGVyIEVWIENlcnRpZmljYXRl +cyBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAm35z7r07eKpkQ0H1UN+U8i6y +jUqORlTSIRLIOTJCBumD1Z9S7eVnAztUwYyZmczpwA//DdmEEbK40ctb3B75aDFk4Zv6dOtouSCV +98YPjUesWgbdYavi7NifFy2cyjw1l1VxzUOFsUcW9SxTgHbP0wBkvUCZ3czY28Sf1hNfQYOL+Q2H +klY0bBoQCxfVWhyXWIQ8hBouXJE0bhlffxdpxWXvayHG1VA6v2G5BY3vbzQ6sm8UY78WO5upKv23 +KzhmBsUs4qpnHkWnjQRmQvaPK++IIGmPMowUc9orhpFjIpryp9vOiYurXccUwVswah+xt54ugQEC +7c+WXmPbqOY4twIDAQABo2MwYTAfBgNVHSMEGDAWgBR8cks5x8DbYqVPm6oYNJKiyoOCWTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfHJLOcfA22KlT5uqGDSSosqD +glkwDQYJKoZIhvcNAQEFBQADggEBACrDx0M3j92tpLIM7twUbY8opJhJywyA6vPtI2Z1fcXTIWd5 +0XPFtQO3WKwMVC/GVhMPMdoG52U7HW8228gd+f2ABsqjPWYWqJ1MFn3AlUa1UeTiH9fqBk1jjZaM +7+czV0I664zBechNdn3e9rG3geCg+aF4RhcaVpjwTj2rHO3sOdwHSPdj/gauwqRcalsyiMXHM4Ws +ZkJHwlgkmeHlPuV1LI5D1l08eB6olYIpUNHRFrrvwb562bTYzB5MRuF3sTGrvSrIzo9uoV1/A3U0 +5K2JRVRevq4opbs/eHnrc7MKDf2+yfdWrPa37S+bISnHOLaVxATywy39FCqQmbkHzJ8= +-----END CERTIFICATE----- + +Swisscom Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBkMQswCQYDVQQG +EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy +dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2 +MjUwNzM4MTRaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln +aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvErjw0DzpPM +LgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r0rk0X2s682Q2zsKwzxNo +ysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJ +wDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVPACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpH +Wrumnf2U5NGKpV+GY3aFy6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1a +SgJA/MTAtukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL6yxS +NLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0uPoTXGiTOmekl9Ab +mbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrALacywlKinh/LTSlDcX3KwFnUey7QY +Ypqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velhk6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3 +qPyZ7iVNTA6z00yPhOgpD/0QVAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw +HQYDVR0hBBYwFDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqhb97iEoHF8Twu +MA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4RfbgZPnm3qKhyN2abGu2sEzsO +v2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ +82YqZh6NM4OKb3xuqFp1mrjX2lhIREeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLz +o9v/tdhZsnPdTSpxsrpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcs +a0vvaGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciATwoCqISxx +OQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99nBjx8Oto0QuFmtEYE3saW +mA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5Wt6NlUe07qxS/TFED6F+KBZvuim6c779o ++sjaC+NCydAXFJy3SuCvkychVSa1ZC+N8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TC +rvJcwhbtkj6EPnNgiLx29CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX +5OfNeOI5wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- + +Swisscom Root EV CA 2 +===================== +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAwZzELMAkGA1UE +BhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdpdGFsIENlcnRpZmljYXRlIFNl +cnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcN +MzEwNjI1MDg0NTA4WjBnMQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsT +HERpZ2l0YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYg +Q0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7BxUglgRCgz +o3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD1ycfMQ4jFrclyxy0uYAy +Xhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPHoCE2G3pXKSinLr9xJZDzRINpUKTk4Rti +GZQJo/PDvO/0vezbE53PnUgJUmfANykRHvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8Li +qG12W0OfvrSdsyaGOx9/5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaH +Za0zKcQvidm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHLOdAG +alNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaCNYGu+HuB5ur+rPQa +m3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f46Fq9mDU5zXNysRojddxyNMkM3Ox +bPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCBUWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDi +xzgHcgplwLa7JSnaFp6LNYth7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWBbj2ITY1x0kbB +bkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6xXCX5145v9Ydkn+0UjrgEjihL +j6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98TPLr+flaYC/NUn81ETm484T4VvwYmneTwkLbU +wp4wLh/vx3rEUMfqe9pQy3omywC0Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7 +XwgiG/W9mR4U9s70WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH +59yLGn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm7JFe3VE/ +23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4Snr8PyQUQ3nqjsTzyP6Wq +J3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VNvBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyA +HmBR3NdUIR7KYndP+tiPsys6DXhyyWhBWkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/gi +uMod89a2GQ+fYWVq6nTIfI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuW +l8PVP3wbI+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- + +CA Disig Root R1 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy +3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8 +u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2 +m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk +CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa +YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6 +vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL +LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX +ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is +XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ +04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B +LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM +CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb +VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85 +YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS +ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix +lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N +UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ +a7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- diff --git a/ext/boto/cloudformation/__init__.py b/ext/boto/cloudformation/__init__.py new file mode 100644 index 0000000000..fec1e5e0a2 --- /dev/null +++ b/ext/boto/cloudformation/__init__.py @@ -0,0 +1,55 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.cloudformation.connection import CloudFormationConnection +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.regioninfo import connect + +RegionData = load_regions().get('cloudformation') + + +def regions(): + """ + Get all available regions for the CloudFormation service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions( + 'cloudformation', + connection_cls=CloudFormationConnection + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.cloudformation.CloudFormationConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.cloudformation.CloudFormationConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('cloudformation', region_name, + connection_cls=CloudFormationConnection, **kw_params) diff --git a/ext/boto/cloudformation/connection.py b/ext/boto/cloudformation/connection.py new file mode 100644 index 0000000000..ee850b30ca --- /dev/null +++ b/ext/boto/cloudformation/connection.py @@ -0,0 +1,922 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.cloudformation.stack import Stack, StackSummary, StackEvent +from boto.cloudformation.stack import StackResource, StackResourceSummary +from boto.cloudformation.template import Template +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.compat import json + + +class CloudFormationConnection(AWSQueryConnection): + """ + AWS CloudFormation + AWS CloudFormation enables you to create and manage AWS + infrastructure deployments predictably and repeatedly. AWS + CloudFormation helps you leverage AWS products such as Amazon EC2, + EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable, + highly scalable, cost effective applications without worrying + about creating and configuring the underlying AWS infrastructure. + + With AWS CloudFormation, you declare all of your resources and + dependencies in a template file. The template defines a collection + of resources as a single unit called a stack. AWS CloudFormation + creates and deletes all member resources of the stack together and + manages all dependencies between the resources for you. + + For more information about this product, go to the `CloudFormation + Product Page`_. + + Amazon CloudFormation makes use of other AWS products. If you need + additional technical information about a specific AWS product, you + can find the product's technical documentation at + `http://aws.amazon.com/documentation/`_. + """ + APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15') + DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint', + 'cloudformation.us-east-1.amazonaws.com') + + valid_states = ( + 'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE', + 'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE', + 'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE', + 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS', + 'UPDATE_ROLLBACK_FAILED', + 'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS', + 'UPDATE_ROLLBACK_COMPLETE') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, CloudFormationConnection) + self.region = region + super(CloudFormationConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def encode_bool(self, v): + v = bool(v) + return {True: "true", False: "false"}[v] + + def _build_create_or_update_params(self, stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags, use_previous_template=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None): + """ + Helper that creates JSON parameters needed by a Stack Create or + Stack Update call. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: list + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. + + :rtype: dict + :return: JSON parameters represented as a Python dict. + """ + params = {'ContentType': "JSON", 'StackName': stack_name, + 'DisableRollback': self.encode_bool(disable_rollback)} + if template_body: + params['TemplateBody'] = template_body + if template_url: + params['TemplateURL'] = template_url + if use_previous_template is not None: + params['UsePreviousTemplate'] = self.encode_bool(use_previous_template) + if template_body and template_url: + boto.log.warning("If both TemplateBody and TemplateURL are" + " specified, only TemplateBody will be honored by the API") + if parameters and len(parameters) > 0: + for i, parameter_tuple in enumerate(parameters): + key, value = parameter_tuple[:2] + use_previous = (parameter_tuple[2] + if len(parameter_tuple) > 2 else False) + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + if use_previous: + params['Parameters.member.%d.UsePreviousValue' + % (i + 1)] = self.encode_bool(use_previous) + else: + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + if capabilities: + for i, value in enumerate(capabilities): + params['Capabilities.member.%d' % (i + 1)] = value + if tags: + for i, (key, value) in enumerate(tags.items()): + params['Tags.member.%d.Key' % (i + 1)] = key + params['Tags.member.%d.Value' % (i + 1)] = value + if notification_arns and len(notification_arns) > 0: + self.build_list_params(params, notification_arns, + "NotificationARNs.member") + if timeout_in_minutes: + params['TimeoutInMinutes'] = int(timeout_in_minutes) + if disable_rollback is not None: + params['DisableRollback'] = str( + disable_rollback).lower() + if on_failure is not None: + params['OnFailure'] = on_failure + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + if stack_policy_during_update_body is not None: + params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body + if stack_policy_during_update_url is not None: + params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url + return params + + def _do_request(self, call, params, path, method): + """ + Do a request via ``self.make_request`` and parse the JSON response. + + :type call: string + :param call: Call name, e.g. ``CreateStack`` + + :type params: dict + :param params: Dictionary of call parameters + + :type path: string + :param path: Server path + + :type method: string + :param method: HTTP method to use + + :rtype: dict + :return: Parsed JSON response data + """ + response = self.make_request(call, params, path, method) + body = response.read().decode('utf-8') + if response.status == 200: + body = json.loads(body) + return body + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body=body) + + def create_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=None, + timeout_in_minutes=None, capabilities=None, tags=None, + on_failure=None, stack_policy_body=None, stack_policy_url=None): + """ + Creates a stack as specified in the template. After the call + completes successfully, the stack creation starts. You can + check the status of the stack via the DescribeStacks API. + Currently, the limit for stacks is 20 stacks per account per + region. + + :type stack_name: string + :param stack_name: + The name associated with the stack. The name must be unique within your + AWS account. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. + + :type disable_rollback: boolean + :param disable_rollback: Set to `True` to disable rollback of the stack + if stack creation failed. You can specify either `DisableRollback` + or `OnFailure`, but not both. + Default: `False` + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type capabilities: list + :param capabilities: The list of capabilities that you want to allow in + the stack. If your template contains certain resources, you must + specify the CAPABILITY_IAM value for this parameter; otherwise, + this action returns an InsufficientCapabilities error. The + following resources require you to specify the capabilities + parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_, + `AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_, + `AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and + `AWS::IAM::UserToGroupAddition`_. + + :type on_failure: string + :param on_failure: Determines what action will be taken if stack + creation fails. This must be one of: DO_NOTHING, ROLLBACK, or + DELETE. You can specify either `OnFailure` or `DisableRollback`, + but not both. + Default: `ROLLBACK` + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + If you pass `StackPolicyBody` and `StackPolicyURL`, only + `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. If you pass + `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is + used. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, on_failure, stack_policy_body, + stack_policy_url, tags) + body = self._do_request('CreateStack', params, '/', 'POST') + return body['CreateStackResponse']['CreateStackResult']['StackId'] + + def update_stack(self, stack_name, template_body=None, template_url=None, + parameters=None, notification_arns=None, disable_rollback=False, + timeout_in_minutes=None, capabilities=None, tags=None, + use_previous_template=None, + stack_policy_during_update_body=None, + stack_policy_during_update_url=None, + stack_policy_body=None, stack_policy_url=None): + """ + Updates a stack as specified in the template. After the call + completes successfully, the stack update starts. You can check + the status of the stack via the DescribeStacks action. + + + + **Note: **You cannot update `AWS::S3::Bucket`_ resources, for + example, to add or modify tags. + + + + To get a copy of the template for an existing stack, you can + use the GetTemplate action. + + Tags that were associated with this stack during creation time + will still be associated with the stack after an `UpdateStack` + operation. + + For more information about creating an update template, + updating a stack, and monitoring the progress of the update, + see `Updating a Stack`_. + + :type stack_name: string + :param stack_name: + The name or stack ID of the stack to update. + + Must contain only alphanumeric characters (case sensitive) and start + with an alpha character. Maximum length of the name is 255 + characters. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to the `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. If both `TemplateBody` and + `TemplateUrl` are passed, only `TemplateBody` is used. + `TemplateBody`. + + :type use_previous_template: boolean + :param use_previous_template: Set to `True` to use the previous + template instead of uploading a new one via `TemplateBody` or + `TemplateURL`. + Conditional: You must pass either `UsePreviousTemplate` or one of + `TemplateBody` or `TemplateUrl`. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the stack. A 3-tuple (key, value, bool) may be used to + specify the `UsePreviousValue` option. + + :type notification_arns: list + :param notification_arns: The Simple Notification Service (SNS) topic + ARNs to publish stack related events. You can find your SNS topic + ARNs using the `SNS console`_ or your Command Line Interface (CLI). + + :type disable_rollback: bool + :param disable_rollback: Indicates whether or not to rollback on + failure. + + :type timeout_in_minutes: integer + :param timeout_in_minutes: The amount of time that can pass before the + stack status becomes CREATE_FAILED; if `DisableRollback` is not set + or is set to `False`, the stack will be rolled back. + + :type capabilities: list + :param capabilities: The list of capabilities you want to allow in + the stack. Currently, the only valid capability is + 'CAPABILITY_IAM'. + + :type tags: dict + :param tags: A set of user-defined `Tags` to associate with this stack, + represented by key/value pairs. Tags defined for the stack are + propagated to EC2 resources that are created as part of the stack. + A maximum number of 10 tags can be specified. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type stack_policy_during_update_body: string + :param stack_policy_during_update_body: Structure containing the + temporary overriding stack policy body. If you pass + `StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`, + only `StackPolicyDuringUpdateBody` is used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that associated with the stack + will be used. + + :type stack_policy_during_update_url: string + :param stack_policy_during_update_url: Location of a file containing + the temporary overriding stack policy. The URL must point to a + policy (max size: 16KB) located in an S3 bucket in the same region + as the stack. If you pass `StackPolicyDuringUpdateBody` and + `StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is + used. + If you want to update protected resources, specify a temporary + overriding stack policy during this update. If you do not specify a + stack policy, the current policy that is associated with the stack + will be used. + + :rtype: string + :return: The unique Stack ID. + """ + params = self._build_create_or_update_params(stack_name, template_body, + template_url, parameters, disable_rollback, timeout_in_minutes, + notification_arns, capabilities, None, stack_policy_body, + stack_policy_url, tags, use_previous_template, + stack_policy_during_update_body, stack_policy_during_update_url) + body = self._do_request('UpdateStack', params, '/', 'POST') + return body['UpdateStackResponse']['UpdateStackResult']['StackId'] + + def delete_stack(self, stack_name_or_id): + """ + Deletes a specified stack. Once the call completes + successfully, stack deletion starts. Deleted stacks do not + show up in the DescribeStacks API if the deletion has been + completed successfully. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id} + return self._do_request('DeleteStack', params, '/', 'GET') + + def describe_stack_events(self, stack_name_or_id=None, next_token=None): + """ + Returns all stack related events for a specified stack. For + more information about a stack's event history, go to + `Stacks`_ in the AWS CloudFormation User Guide. + Events are returned, even if the stack never existed or has + been successfully deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + events, if there is one. + Default: There is no default value. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeStackEvents', params, [('member', + StackEvent)]) + + def describe_stack_resource(self, stack_name_or_id, logical_resource_id): + """ + Returns a description of the specified resource in the + specified stack. + + For deleted stacks, DescribeStackResource returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, + 'LogicalResourceId': logical_resource_id} + return self._do_request('DescribeStackResource', params, '/', 'GET') + + def describe_stack_resources(self, stack_name_or_id=None, + logical_resource_id=None, + physical_resource_id=None): + """ + Returns AWS resource descriptions for running and deleted + stacks. If `StackName` is specified, all the associated + resources that are part of the stack are returned. If + `PhysicalResourceId` is specified, the associated resources of + the stack that the resource belongs to are returned. + Only the first 100 resources will be returned. If your stack + has more resources than this, you should use + `ListStackResources` instead. + For deleted stacks, `DescribeStackResources` returns resource + information for up to 90 days after the stack has been + deleted. + + You must specify either `StackName` or `PhysicalResourceId`, + but not both. In addition, you can specify `LogicalResourceId` + to filter the returned result. For more information about + resources, the `LogicalResourceId` and `PhysicalResourceId`, + go to the `AWS CloudFormation User Guide`_. + A `ValidationError` is returned if you specify both + `StackName` and `PhysicalResourceId` in the same request. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Required: Conditional. If you do not specify `StackName`, you must + specify `PhysicalResourceId`. + + Default: There is no default value. + + :type logical_resource_id: string + :param logical_resource_id: The logical name of the resource as + specified in the template. + Default: There is no default value. + + :type physical_resource_id: string + :param physical_resource_id: The name or unique identifier that + corresponds to a physical instance ID of a resource supported by + AWS CloudFormation. + For example, for an Amazon Elastic Compute Cloud (EC2) instance, + `PhysicalResourceId` corresponds to the `InstanceId`. You can pass + the EC2 `InstanceId` to `DescribeStackResources` to find which + stack the instance belongs to and what other resources are part of + the stack. + + Required: Conditional. If you do not specify `PhysicalResourceId`, you + must specify `StackName`. + + Default: There is no default value. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if logical_resource_id: + params['LogicalResourceId'] = logical_resource_id + if physical_resource_id: + params['PhysicalResourceId'] = physical_resource_id + return self.get_list('DescribeStackResources', params, + [('member', StackResource)]) + + def describe_stacks(self, stack_name_or_id=None, next_token=None): + """ + Returns the description for the specified stack; if no stack + name was specified, then it returns the description for all + the stacks created. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack. + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + if next_token is not None: + params['NextToken'] = next_token + return self.get_list('DescribeStacks', params, [('member', Stack)]) + + def get_template(self, stack_name_or_id): + """ + Returns the template body for a specified stack. You can get + the template for running or deleted stacks. + + For deleted stacks, GetTemplate returns the template for up to + 90 days after the stack has been deleted. + If the template does not exist, a `ValidationError` is + returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id} + return self._do_request('GetTemplate', params, '/', 'GET') + + def list_stack_resources(self, stack_name_or_id, next_token=None): + """ + Returns descriptions of all resources of the specified stack. + + For deleted stacks, ListStackResources returns resource + information for up to 90 days after the stack has been + deleted. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated + with the stack, which are not always interchangeable: + + + Running stacks: You can specify either the stack's name or its unique + stack ID. + + Deleted stacks: You must specify the unique stack ID. + + + Default: There is no default value. + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stack resource summaries, if there is one. + Default: There is no default value. + + """ + params = {'StackName': stack_name_or_id} + if next_token: + params['NextToken'] = next_token + return self.get_list('ListStackResources', params, + [('member', StackResourceSummary)]) + + def list_stacks(self, stack_status_filters=None, next_token=None): + """ + Returns the summary information for stacks whose status + matches the specified StackStatusFilter. Summary information + for stacks that have been deleted is kept for 90 days after + the stack is deleted. If no StackStatusFilter is specified, + summary information for all stacks is returned (including + existing stacks and stacks that have been deleted). + + :type next_token: string + :param next_token: String that identifies the start of the next list of + stacks, if there is one. + Default: There is no default value. + + :type stack_status_filter: list + :param stack_status_filter: Stack status to use as a filter. Specify + one or more stack status codes to list only stacks with the + specified status codes. For a complete list of stack status codes, + see the `StackStatus` parameter of the Stack data type. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + if stack_status_filters and len(stack_status_filters) > 0: + self.build_list_params(params, stack_status_filters, + "StackStatusFilter.member") + + return self.get_list('ListStacks', params, + [('member', StackSummary)]) + + def validate_template(self, template_body=None, template_url=None): + """ + Validates a specified template. + + :type template_body: string + :param template_body: String containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template (max size: 307,200 bytes) located in + an S3 bucket in the same region as the stack. For more information, + go to `Template Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + """ + params = {} + if template_body: + params['TemplateBody'] = template_body + if template_url: + params['TemplateURL'] = template_url + if template_body and template_url: + boto.log.warning("If both TemplateBody and TemplateURL are" + " specified, only TemplateBody will be honored by the API") + return self.get_object('ValidateTemplate', params, Template, + verb="POST") + + def cancel_update_stack(self, stack_name_or_id=None): + """ + Cancels an update on the specified stack. If the call + completes successfully, the stack will roll back the update + and revert to the previous stack configuration. + Only stacks that are in the UPDATE_IN_PROGRESS state can be + canceled. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or the unique identifier associated with + the stack. + + """ + params = {} + if stack_name_or_id: + params['StackName'] = stack_name_or_id + return self.get_status('CancelUpdateStack', params) + + def estimate_template_cost(self, template_body=None, template_url=None, + parameters=None): + """ + Returns the estimated monthly cost of a template. The return + value is an AWS Simple Monthly Calculator URL with a query + string that describes the resources required to run the + template. + + :type template_body: string + :param template_body: Structure containing the template body. (For more + information, go to `Template Anatomy`_ in the AWS CloudFormation + User Guide.) + Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are + passed, only `TemplateBody` is used. + + :type template_url: string + :param template_url: Location of file containing the template body. The + URL must point to a template located in an S3 bucket in the same + region as the stack. For more information, go to `Template + Anatomy`_ in the AWS CloudFormation User Guide. + Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are + passed, only `TemplateBody` is used. + + :type parameters: list + :param parameters: A list of key/value tuples that specify input + parameters for the template. + + :rtype: string + :returns: URL to pre-filled cost calculator + """ + params = {'ContentType': "JSON"} + if template_body is not None: + params['TemplateBody'] = template_body + if template_url is not None: + params['TemplateURL'] = template_url + if parameters and len(parameters) > 0: + for i, (key, value) in enumerate(parameters): + params['Parameters.member.%d.ParameterKey' % (i + 1)] = key + params['Parameters.member.%d.ParameterValue' % (i + 1)] = value + + response = self._do_request('EstimateTemplateCost', params, '/', 'POST') + return response['EstimateTemplateCostResponse']\ + ['EstimateTemplateCostResult']\ + ['Url'] + + def get_stack_policy(self, stack_name_or_id): + """ + Returns the stack policy for a specified stack. If a stack + doesn't have a policy, a null value is returned. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that is associated with + the stack whose policy you want to get. + + :rtype: string + :return: The policy JSON document + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + response = self._do_request('GetStackPolicy', params, '/', 'POST') + return response['GetStackPolicyResponse']\ + ['GetStackPolicyResult']\ + ['StackPolicyBody'] + + def set_stack_policy(self, stack_name_or_id, stack_policy_body=None, + stack_policy_url=None): + """ + Sets a stack policy for a specified stack. + + :type stack_name_or_id: string + :param stack_name_or_id: The name or stack ID that you want to + associate a policy with. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + + """ + params = {'ContentType': "JSON", 'StackName': stack_name_or_id, } + if stack_policy_body is not None: + params['StackPolicyBody'] = stack_policy_body + if stack_policy_url is not None: + params['StackPolicyURL'] = stack_policy_url + + response = self._do_request('SetStackPolicy', params, '/', 'POST') + return response['SetStackPolicyResponse'] diff --git a/ext/boto/cloudformation/stack.py b/ext/boto/cloudformation/stack.py new file mode 100644 index 0000000000..c00fefcf17 --- /dev/null +++ b/ext/boto/cloudformation/stack.py @@ -0,0 +1,423 @@ +from datetime import datetime + +from boto.resultset import ResultSet + + +class Stack(object): + def __init__(self, connection=None): + self.connection = connection + self.creation_time = None + self.description = None + self.disable_rollback = None + self.notification_arns = [] + self.outputs = [] + self.parameters = [] + self.capabilities = [] + self.tags = [] + self.stack_id = None + self.stack_status = None + self.stack_status_reason = None + self.stack_name = None + self.timeout_in_minutes = None + + @property + def stack_name_reason(self): + return self.stack_status_reason + + @stack_name_reason.setter + def stack_name_reason(self, value): + self.stack_status_reason = value + + def startElement(self, name, attrs, connection): + if name == "Parameters": + self.parameters = ResultSet([('member', Parameter)]) + return self.parameters + elif name == "Outputs": + self.outputs = ResultSet([('member', Output)]) + return self.outputs + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities + elif name == "Tags": + self.tags = Tag() + return self.tags + elif name == 'NotificationARNs': + self.notification_arns = ResultSet([('member', NotificationARN)]) + return self.notification_arns + else: + return None + + def endElement(self, name, value, connection): + if name == 'CreationTime': + try: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == "Description": + self.description = value + elif name == "DisableRollback": + if str(value).lower() == 'true': + self.disable_rollback = True + else: + self.disable_rollback = False + elif name == 'StackId': + self.stack_id = value + elif name == 'StackName': + self.stack_name = value + elif name == 'StackStatus': + self.stack_status = value + elif name == "StackStatusReason": + self.stack_status_reason = value + elif name == "TimeoutInMinutes": + self.timeout_in_minutes = int(value) + elif name == "member": + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_stack(stack_name_or_id=self.stack_id) + + def describe_events(self, next_token=None): + return self.connection.describe_stack_events( + stack_name_or_id=self.stack_id, + next_token=next_token + ) + + def describe_resource(self, logical_resource_id): + return self.connection.describe_stack_resource( + stack_name_or_id=self.stack_id, + logical_resource_id=logical_resource_id + ) + + def describe_resources(self, logical_resource_id=None, + physical_resource_id=None): + return self.connection.describe_stack_resources( + stack_name_or_id=self.stack_id, + logical_resource_id=logical_resource_id, + physical_resource_id=physical_resource_id + ) + + def list_resources(self, next_token=None): + return self.connection.list_stack_resources( + stack_name_or_id=self.stack_id, + next_token=next_token + ) + + def update(self): + rs = self.connection.describe_stacks(self.stack_id) + if len(rs) == 1 and rs[0].stack_id == self.stack_id: + self.__dict__.update(rs[0].__dict__) + else: + raise ValueError("%s is not a valid Stack ID or Name" % + self.stack_id) + + def get_template(self): + return self.connection.get_template(stack_name_or_id=self.stack_id) + + def get_policy(self): + """ + Returns the stack policy for this stack. If it has no policy + then, a null value is returned. + """ + return self.connection.get_stack_policy(self.stack_id) + + def set_policy(self, stack_policy_body=None, stack_policy_url=None): + """ + Sets a stack policy for this stack. + + :type stack_policy_body: string + :param stack_policy_body: Structure containing the stack policy body. + (For more information, go to ` Prevent Updates to Stack Resources`_ + in the AWS CloudFormation User Guide.) + You must pass `StackPolicyBody` or `StackPolicyURL`. If both are + passed, only `StackPolicyBody` is used. + + :type stack_policy_url: string + :param stack_policy_url: Location of a file containing the stack + policy. The URL must point to a policy (max size: 16KB) located in + an S3 bucket in the same region as the stack. You must pass + `StackPolicyBody` or `StackPolicyURL`. If both are passed, only + `StackPolicyBody` is used. + """ + return self.connection.set_stack_policy(self.stack_id, + stack_policy_body=stack_policy_body, + stack_policy_url=stack_policy_url) + + +class StackSummary(object): + def __init__(self, connection=None): + self.connection = connection + self.stack_id = None + self.stack_status = None + self.stack_name = None + self.creation_time = None + self.deletion_time = None + self.template_description = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'StackId': + self.stack_id = value + elif name == 'StackStatus': + self.stack_status = value + elif name == 'StackName': + self.stack_name = value + elif name == 'CreationTime': + try: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == "DeletionTime": + try: + self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == 'TemplateDescription': + self.template_description = value + elif name == "member": + pass + else: + setattr(self, name, value) + + +class Parameter(object): + def __init__(self, connection=None): + self.connection = None + self.key = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "ParameterKey": + self.key = value + elif name == "ParameterValue": + self.value = value + else: + setattr(self, name, value) + + def __repr__(self): + return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value) + + +class Output(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.key = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "OutputKey": + self.key = value + elif name == "OutputValue": + self.value = value + else: + setattr(self, name, value) + + def __repr__(self): + return "Output:\"%s\"=\"%s\"" % (self.key, self.value) + + +class Capability(object): + def __init__(self, connection=None): + self.connection = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + self.value = value + + def __repr__(self): + return "Capability:\"%s\"" % (self.value) + + +class Tag(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self._current_key = None + self._current_value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Key": + self._current_key = value + elif name == "Value": + self._current_value = value + else: + setattr(self, name, value) + + if self._current_key and self._current_value: + self[self._current_key] = self._current_value + self._current_key = None + self._current_value = None + + +class NotificationARN(object): + def __init__(self, connection=None): + self.connection = None + self.value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + self.value = value + + def __repr__(self): + return "NotificationARN:\"%s\"" % (self.value) + + +class StackResource(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + self.stack_id = None + self.stack_name = None + self.timestamp = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + elif name == "StackId": + self.stack_id = value + elif name == "StackName": + self.stack_name = value + elif name == "Timestamp": + try: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + else: + setattr(self, name, value) + + def __repr__(self): + return "StackResource:%s (%s)" % (self.logical_resource_id, + self.resource_type) + + +class StackResourceSummary(object): + def __init__(self, connection=None): + self.connection = connection + self.last_updated_time = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "LastUpdatedTime": + try: + self.last_updated_time = datetime.strptime( + value, + '%Y-%m-%dT%H:%M:%SZ' + ) + except ValueError: + self.last_updated_time = datetime.strptime( + value, + '%Y-%m-%dT%H:%M:%S.%fZ' + ) + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + else: + setattr(self, name, value) + + def __repr__(self): + return "StackResourceSummary:%s (%s)" % (self.logical_resource_id, + self.resource_type) + + +class StackEvent(object): + valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE", + "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE") + def __init__(self, connection=None): + self.connection = connection + self.event_id = None + self.logical_resource_id = None + self.physical_resource_id = None + self.resource_properties = None + self.resource_status = None + self.resource_status_reason = None + self.resource_type = None + self.stack_id = None + self.stack_name = None + self.timestamp = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "EventId": + self.event_id = value + elif name == "LogicalResourceId": + self.logical_resource_id = value + elif name == "PhysicalResourceId": + self.physical_resource_id = value + elif name == "ResourceProperties": + self.resource_properties = value + elif name == "ResourceStatus": + self.resource_status = value + elif name == "ResourceStatusReason": + self.resource_status_reason = value + elif name == "ResourceType": + self.resource_type = value + elif name == "StackId": + self.stack_id = value + elif name == "StackName": + self.stack_name = value + elif name == "Timestamp": + try: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + else: + setattr(self, name, value) + + def __repr__(self): + return "StackEvent %s %s %s" % (self.resource_type, + self.logical_resource_id, self.resource_status) diff --git a/ext/boto/cloudformation/template.py b/ext/boto/cloudformation/template.py new file mode 100644 index 0000000000..bab2148630 --- /dev/null +++ b/ext/boto/cloudformation/template.py @@ -0,0 +1,51 @@ +from boto.resultset import ResultSet +from boto.cloudformation.stack import Capability + +class Template(object): + def __init__(self, connection=None): + self.connection = connection + self.description = None + self.template_parameters = None + self.capabilities_reason = None + self.capabilities = None + + def startElement(self, name, attrs, connection): + if name == "Parameters": + self.template_parameters = ResultSet([('member', TemplateParameter)]) + return self.template_parameters + elif name == "Capabilities": + self.capabilities = ResultSet([('member', Capability)]) + return self.capabilities + else: + return None + + def endElement(self, name, value, connection): + if name == "Description": + self.description = value + elif name == "CapabilitiesReason": + self.capabilities_reason = value + else: + setattr(self, name, value) + +class TemplateParameter(object): + def __init__(self, parent): + self.parent = parent + self.default_value = None + self.description = None + self.no_echo = None + self.parameter_key = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == "DefaultValue": + self.default_value = value + elif name == "Description": + self.description = value + elif name == "NoEcho": + self.no_echo = bool(value) + elif name == "ParameterKey": + self.parameter_key = value + else: + setattr(self, name, value) diff --git a/ext/boto/cloudfront/__init__.py b/ext/boto/cloudfront/__init__.py new file mode 100644 index 0000000000..1afefebbf3 --- /dev/null +++ b/ext/boto/cloudfront/__init__.py @@ -0,0 +1,326 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import xml.sax +import time +import boto +from boto.connection import AWSAuthConnection +from boto import handler +from boto.cloudfront.distribution import Distribution, DistributionSummary, DistributionConfig +from boto.cloudfront.distribution import StreamingDistribution, StreamingDistributionSummary, StreamingDistributionConfig +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.identity import OriginAccessIdentitySummary +from boto.cloudfront.identity import OriginAccessIdentityConfig +from boto.cloudfront.invalidation import InvalidationBatch, InvalidationSummary, InvalidationListResultSet +from boto.resultset import ResultSet +from boto.cloudfront.exception import CloudFrontServerError + + +class CloudFrontConnection(AWSAuthConnection): + + DefaultHost = 'cloudfront.amazonaws.com' + Version = '2010-11-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0, security_token=None, + validate_certs=True, profile_name=None, https_connection_factory=None): + super(CloudFrontConnection, self).__init__(host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug, + security_token=security_token, + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) + + def get_etag(self, response): + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + return response_headers[key] + return None + + def _required_auth_capability(self): + return ['cloudfront'] + + # Generics + + def _get_all_objects(self, resource, tags, result_set_class=None, + result_set_kwargs=None): + if not tags: + tags = [('DistributionSummary', DistributionSummary)] + response = self.make_request('GET', '/%s/%s' % (self.Version, + resource)) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + rs_class = result_set_class or ResultSet + rs_kwargs = result_set_kwargs or dict() + rs = rs_class(tags, **rs_kwargs) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def _get_info(self, id, resource, dist_class): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = dist_class(connection=self) + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + d.etag = response_headers[key] + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _get_config(self, id, resource, config_class): + uri = '/%s/%s/%s/config' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = config_class(connection=self) + d.etag = self.get_etag(response) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _set_config(self, distribution_id, etag, config): + if isinstance(config, StreamingDistributionConfig): + resource = 'streaming-distribution' + else: + resource = 'distribution' + uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id) + headers = {'If-Match': etag, 'Content-Type': 'text/xml'} + response = self.make_request('PUT', uri, headers, config.to_xml()) + body = response.read() + boto.log.debug(body) + if response.status != 200: + raise CloudFrontServerError(response.status, response.reason, body) + return self.get_etag(response) + + def _create_object(self, config, resource, dist_class): + response = self.make_request('POST', '/%s/%s' % (self.Version, + resource), + {'Content-Type': 'text/xml'}, + data=config.to_xml()) + body = response.read() + boto.log.debug(body) + if response.status == 201: + d = dist_class(connection=self) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + d.etag = self.get_etag(response) + return d + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def _delete_object(self, id, etag, resource): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('DELETE', uri, {'If-Match': etag}) + body = response.read() + boto.log.debug(body) + if response.status != 204: + raise CloudFrontServerError(response.status, response.reason, body) + + # Distributions + + def get_all_distributions(self): + tags = [('DistributionSummary', DistributionSummary)] + return self._get_all_objects('distribution', tags) + + def get_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'distribution', Distribution) + + def get_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'distribution', + DistributionConfig) + + def set_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_distribution(self, origin, enabled, caller_reference='', + cnames=None, comment='', trusted_signers=None): + config = DistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers) + return self._create_object(config, 'distribution', Distribution) + + def delete_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, 'distribution') + + # Streaming Distributions + + def get_all_streaming_distributions(self): + tags = [('StreamingDistributionSummary', StreamingDistributionSummary)] + return self._get_all_objects('streaming-distribution', tags) + + def get_streaming_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'streaming-distribution', + StreamingDistribution) + + def get_streaming_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'streaming-distribution', + StreamingDistributionConfig) + + def set_streaming_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_streaming_distribution(self, origin, enabled, + caller_reference='', + cnames=None, comment='', + trusted_signers=None): + config = StreamingDistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers) + return self._create_object(config, 'streaming-distribution', + StreamingDistribution) + + def delete_streaming_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, + 'streaming-distribution') + + # Origin Access Identity + + def get_all_origin_access_identity(self): + tags = [('CloudFrontOriginAccessIdentitySummary', + OriginAccessIdentitySummary)] + return self._get_all_objects('origin-access-identity/cloudfront', tags) + + def get_origin_access_identity_info(self, access_id): + return self._get_info(access_id, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def get_origin_access_identity_config(self, access_id): + return self._get_config(access_id, + 'origin-access-identity/cloudfront', + OriginAccessIdentityConfig) + + def set_origin_access_identity_config(self, access_id, + etag, config): + return self._set_config(access_id, etag, config) + + def create_origin_access_identity(self, caller_reference='', comment=''): + config = OriginAccessIdentityConfig(caller_reference=caller_reference, + comment=comment) + return self._create_object(config, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def delete_origin_access_identity(self, access_id, etag): + return self._delete_object(access_id, etag, + 'origin-access-identity/cloudfront') + + # Object Invalidation + + def create_invalidation_request(self, distribution_id, paths, + caller_reference=None): + """Creates a new invalidation request + :see: http://goo.gl/8vECq + """ + # We allow you to pass in either an array or + # an InvalidationBatch object + if not isinstance(paths, InvalidationBatch): + paths = InvalidationBatch(paths) + paths.connection = self + uri = '/%s/distribution/%s/invalidation' % (self.Version, + distribution_id) + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, + data=paths.to_xml()) + body = response.read() + if response.status == 201: + h = handler.XmlHandler(paths, self) + xml.sax.parseString(body, h) + return paths + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def invalidation_request_status(self, distribution_id, + request_id, caller_reference=None): + uri = '/%s/distribution/%s/invalidation/%s' % (self.Version, + distribution_id, + request_id) + response = self.make_request('GET', uri, {'Content-Type': 'text/xml'}) + body = response.read() + if response.status == 200: + paths = InvalidationBatch([]) + h = handler.XmlHandler(paths, self) + xml.sax.parseString(body, h) + return paths + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def get_invalidation_requests(self, distribution_id, marker=None, + max_items=None): + """ + Get all invalidation requests for a given CloudFront distribution. + This returns an instance of an InvalidationListResultSet that + automatically handles all of the result paging, etc. from CF - you just + need to keep iterating until there are no more results. + + :type distribution_id: string + :param distribution_id: The id of the CloudFront distribution + + :type marker: string + :param marker: Use this only when paginating results and only in + follow-up request after you've received a response where + the results are truncated. Set this to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results and only in a + follow-up request to indicate the maximum number of + invalidation requests you want in the response. You + will need to pass the next_marker property from the + previous InvalidationListResultSet response in the + follow-up request in order to get the next 'page' of + results. + + :rtype: :class:`boto.cloudfront.invalidation.InvalidationListResultSet` + :returns: An InvalidationListResultSet iterator that lists invalidation + requests for a given CloudFront distribution. Automatically + handles paging the results. + """ + uri = 'distribution/%s/invalidation' % distribution_id + params = dict() + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + if params: + uri += '?%s=%s' % params.popitem() + for k, v in params.items(): + uri += '&%s=%s' % (k, v) + tags=[('InvalidationSummary', InvalidationSummary)] + rs_class = InvalidationListResultSet + rs_kwargs = dict(connection=self, distribution_id=distribution_id, + max_items=max_items, marker=marker) + return self._get_all_objects(uri, tags, result_set_class=rs_class, + result_set_kwargs=rs_kwargs) diff --git a/ext/boto/cloudfront/distribution.py b/ext/boto/cloudfront/distribution.py new file mode 100644 index 0000000000..192c7c3927 --- /dev/null +++ b/ext/boto/cloudfront/distribution.py @@ -0,0 +1,757 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +import base64 +import time +from boto.compat import six, json +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.object import Object, StreamingObject +from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners +from boto.cloudfront.logging import LoggingInfo +from boto.cloudfront.origin import S3Origin, CustomOrigin +from boto.s3.acl import ACL + +class DistributionConfig(object): + + def __init__(self, connection=None, origin=None, enabled=False, + caller_reference='', cnames=None, comment='', + trusted_signers=None, default_root_object=None, + logging=None): + """ + :param origin: Origin information to associate with the + distribution. If your distribution will use + an Amazon S3 origin, then this should be an + S3Origin object. If your distribution will use + a custom origin (non Amazon S3), then this + should be a CustomOrigin object. + :type origin: :class:`boto.cloudfront.origin.S3Origin` or + :class:`boto.cloudfront.origin.CustomOrigin` + + :param enabled: Whether the distribution is enabled to accept + end user requests for content. + :type enabled: bool + + :param caller_reference: A unique number that ensures the + request can't be replayed. If no + caller_reference is provided, boto + will generate a type 4 UUID for use + as the caller reference. + :type enabled: str + + :param cnames: A CNAME alias you want to associate with this + distribution. You can have up to 10 CNAME aliases + per distribution. + :type enabled: array of str + + :param comment: Any comments you want to include about the + distribution. + :type comment: str + + :param trusted_signers: Specifies any AWS accounts you want to + permit to create signed URLs for private + content. If you want the distribution to + use signed URLs, this should contain a + TrustedSigners object; if you want the + distribution to use basic URLs, leave + this None. + :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners` + + :param default_root_object: Designates a default root object. + Only include a DefaultRootObject value + if you are going to assign a default + root object for the distribution. + :type comment: str + + :param logging: Controls whether access logs are written for the + distribution. If you want to turn on access logs, + this should contain a LoggingInfo object; otherwise + it should contain None. + :type logging: :class`boto.cloudfront.logging.LoggingInfo` + + """ + self.connection = connection + self.origin = origin + self.enabled = enabled + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.cnames = [] + if cnames: + self.cnames = cnames + self.comment = comment + self.trusted_signers = trusted_signers + self.logging = logging + self.default_root_object = default_root_object + + def __repr__(self): + return "DistributionConfig:%s" % self.origin + + def to_xml(self): + s = '\n' + s += '\n' + if self.origin: + s += self.origin.to_xml() + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + if self.trusted_signers: + s += '\n' + for signer in self.trusted_signers: + if signer == 'Self': + s += ' \n' + else: + s += ' %s\n' % signer + s += '\n' + if self.logging: + s += '\n' + s += ' %s\n' % self.logging.bucket + s += ' %s\n' % self.logging.prefix + s += '\n' + if self.default_root_object: + dro = self.default_root_object + s += '%s\n' % dro + s += '\n' + return s + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + elif name == 'Logging': + self.logging = LoggingInfo() + return self.logging + elif name == 'S3Origin': + self.origin = S3Origin() + return self.origin + elif name == 'CustomOrigin': + self.origin = CustomOrigin() + return self.origin + else: + return None + + def endElement(self, name, value, connection): + if name == 'CNAME': + self.cnames.append(value) + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'CallerReference': + self.caller_reference = value + elif name == 'DefaultRootObject': + self.default_root_object = value + else: + setattr(self, name, value) + +class StreamingDistributionConfig(DistributionConfig): + + def __init__(self, connection=None, origin='', enabled=False, + caller_reference='', cnames=None, comment='', + trusted_signers=None, logging=None): + super(StreamingDistributionConfig, self).__init__(connection=connection, + origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment, + trusted_signers=trusted_signers, + logging=logging) + def to_xml(self): + s = '\n' + s += '\n' + if self.origin: + s += self.origin.to_xml() + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + if self.trusted_signers: + s += '\n' + for signer in self.trusted_signers: + if signer == 'Self': + s += ' \n' + else: + s += ' %s\n' % signer + s += '\n' + if self.logging: + s += '\n' + s += ' %s\n' % self.logging.bucket + s += ' %s\n' % self.logging.prefix + s += '\n' + s += '\n' + return s + +class DistributionSummary(object): + + def __init__(self, connection=None, domain_name='', id='', + last_modified_time=None, status='', origin=None, + cname='', comment='', enabled=False): + self.connection = connection + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.origin = origin + self.enabled = enabled + self.cnames = [] + if cname: + self.cnames.append(cname) + self.comment = comment + self.trusted_signers = None + self.etag = None + self.streaming = False + + def __repr__(self): + return "DistributionSummary:%s" % self.domain_name + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + elif name == 'S3Origin': + self.origin = S3Origin() + return self.origin + elif name == 'CustomOrigin': + self.origin = CustomOrigin() + return self.origin + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Status': + self.status = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'DomainName': + self.domain_name = value + elif name == 'Origin': + self.origin = value + elif name == 'CNAME': + self.cnames.append(value) + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'StreamingDistributionSummary': + self.streaming = True + else: + setattr(self, name, value) + + def get_distribution(self): + return self.connection.get_distribution_info(self.id) + +class StreamingDistributionSummary(DistributionSummary): + + def get_distribution(self): + return self.connection.get_streaming_distribution_info(self.id) + +class Distribution(object): + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + self.connection = connection + self.config = config + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.in_progress_invalidation_batches = 0 + self.active_signers = None + self.etag = None + self._bucket = None + self._object_class = Object + + def __repr__(self): + return "Distribution:%s" % self.domain_name + + def startElement(self, name, attrs, connection): + if name == 'DistributionConfig': + self.config = DistributionConfig() + return self.config + elif name == 'ActiveTrustedSigners': + self.active_signers = ActiveTrustedSigners() + return self.active_signers + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'Status': + self.status = value + elif name == 'InProgressInvalidationBatches': + self.in_progress_invalidation_batches = int(value) + elif name == 'DomainName': + self.domain_name = value + else: + setattr(self, name, value) + + def update(self, enabled=None, cnames=None, comment=None): + """ + Update the configuration of the Distribution. The only values + of the DistributionConfig that can be directly updated are: + + * CNAMES + * Comment + * Whether the Distribution is enabled or not + + Any changes to the ``trusted_signers`` or ``origin`` properties of + this distribution's current config object will also be included in + the update. Therefore, to set the origin access identity for this + distribution, set ``Distribution.config.origin.origin_access_identity`` + before calling this update method. + + :type enabled: bool + :param enabled: Whether the Distribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + """ + new_config = DistributionConfig(self.connection, self.config.origin, + self.config.enabled, self.config.caller_reference, + self.config.cnames, self.config.comment, + self.config.trusted_signers, + self.config.default_root_object) + if enabled is not None: + new_config.enabled = enabled + if cnames is not None: + new_config.cnames = cnames + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) + self.config = new_config + self._object_class = Object + + def enable(self): + """ + Activate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=True) + + def disable(self): + """ + Deactivate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=False) + + def delete(self): + """ + Delete this CloudFront Distribution. The content + associated with the Distribution is not deleted from + the underlying Origin bucket in S3. + """ + self.connection.delete_distribution(self.id, self.etag) + + def _get_bucket(self): + if isinstance(self.config.origin, S3Origin): + if not self._bucket: + bucket_dns_name = self.config.origin.dns_name + bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '') + from boto.s3.connection import S3Connection + s3 = S3Connection(self.connection.aws_access_key_id, + self.connection.aws_secret_access_key, + proxy=self.connection.proxy, + proxy_port=self.connection.proxy_port, + proxy_user=self.connection.proxy_user, + proxy_pass=self.connection.proxy_pass) + self._bucket = s3.get_bucket(bucket_name) + self._bucket.distribution = self + self._bucket.set_key_class(self._object_class) + return self._bucket + else: + raise NotImplementedError('Unable to get_objects on CustomOrigin') + + def get_objects(self): + """ + Return a list of all content objects in this distribution. + + :rtype: list of :class:`boto.cloudfront.object.Object` + :return: The content objects + """ + bucket = self._get_bucket() + objs = [] + for key in bucket: + objs.append(key) + return objs + + def set_permissions(self, object, replace=False): + """ + Sets the S3 ACL grants for the given object to the appropriate + value based on the type of Distribution. If the Distribution + is serving private content the ACL will be set to include the + Origin Access Identity associated with the Distribution. If + the Distribution is serving public content the content will + be set up with "public-read". + + :type object: :class:`boto.cloudfront.object.Object` + :param enabled: The Object whose ACL is being set + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + if isinstance(self.config.origin, S3Origin): + if self.config.origin.origin_access_identity: + id = self.config.origin.origin_access_identity.split('/')[-1] + oai = self.connection.get_origin_access_identity_info(id) + policy = object.get_acl() + if replace: + policy.acl = ACL() + policy.acl.add_user_grant('READ', oai.s3_user_id) + object.set_acl(policy) + else: + object.set_canned_acl('public-read') + + def set_permissions_all(self, replace=False): + """ + Sets the S3 ACL grants for all objects in the Distribution + to the appropriate value based on the type of Distribution. + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + bucket = self._get_bucket() + for key in bucket: + self.set_permissions(key, replace) + + def add_object(self, name, content, headers=None, replace=True): + """ + Adds a new content object to the Distribution. The content + for the object will be copied to a new Key in the S3 Bucket + and the permissions will be set appropriately for the type + of Distribution. + + :type name: str or unicode + :param name: The name or key of the new object. + + :type content: file-like object + :param content: A file-like object that contains the content + for the new object. + + :type headers: dict + :param headers: A dictionary containing additional headers + you would like associated with the new + object in S3. + + :rtype: :class:`boto.cloudfront.object.Object` + :return: The newly created object. + """ + if self.config.origin.origin_access_identity: + policy = 'private' + else: + policy = 'public-read' + bucket = self._get_bucket() + object = bucket.new_key(name) + object.set_contents_from_file(content, headers=headers, policy=policy) + if self.config.origin.origin_access_identity: + self.set_permissions(object, replace) + return object + + def create_signed_url(self, url, keypair_id, + expire_time=None, valid_after_time=None, + ip_address=None, policy_url=None, + private_key_file=None, private_key_string=None): + """ + Creates a signed CloudFront URL that is only valid within the specified + parameters. + + :type url: str + :param url: The URL of the protected object. + + :type keypair_id: str + :param keypair_id: The keypair ID of the Amazon KeyPair used to sign + theURL. This ID MUST correspond to the private key + specified with private_key_file or private_key_string. + + :type expire_time: int + :param expire_time: The expiry time of the URL. If provided, the URL + will expire after the time has passed. If not provided the URL will + never expire. Format is a unix epoch. + Use int(time.time() + duration_in_sec). + + :type valid_after_time: int + :param valid_after_time: If provided, the URL will not be valid until + after valid_after_time. Format is a unix epoch. + Use int(time.time() + secs_until_valid). + + :type ip_address: str + :param ip_address: If provided, only allows access from the specified + IP address. Use '192.168.0.10' for a single IP or + use '192.168.0.0/24' CIDR notation for a subnet. + + :type policy_url: str + :param policy_url: If provided, allows the signature to contain + wildcard globs in the URL. For example, you could + provide: 'http://example.com/media/\*' and the policy + and signature would allow access to all contents of + the media subdirectory. If not specified, only + allow access to the exact url provided in 'url'. + + :type private_key_file: str or file object. + :param private_key_file: If provided, contains the filename of the + private key file used for signing or an open + file object containing the private key + contents. Only one of private_key_file or + private_key_string can be provided. + + :type private_key_string: str + :param private_key_string: If provided, contains the private key string + used for signing. Only one of private_key_file or + private_key_string can be provided. + + :rtype: str + :return: The signed URL. + """ + # Get the required parameters + params = self._create_signing_params( + url=url, keypair_id=keypair_id, expire_time=expire_time, + valid_after_time=valid_after_time, ip_address=ip_address, + policy_url=policy_url, private_key_file=private_key_file, + private_key_string=private_key_string) + + #combine these into a full url + if "?" in url: + sep = "&" + else: + sep = "?" + signed_url_params = [] + for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]: + if key in params: + param = "%s=%s" % (key, params[key]) + signed_url_params.append(param) + signed_url = url + sep + "&".join(signed_url_params) + return signed_url + + def _create_signing_params(self, url, keypair_id, + expire_time=None, valid_after_time=None, + ip_address=None, policy_url=None, + private_key_file=None, private_key_string=None): + """ + Creates the required URL parameters for a signed URL. + """ + params = {} + # Check if we can use a canned policy + if expire_time and not valid_after_time and not ip_address and not policy_url: + # we manually construct this policy string to ensure formatting + # matches signature + policy = self._canned_policy(url, expire_time) + params["Expires"] = str(expire_time) + else: + # If no policy_url is specified, default to the full url. + if policy_url is None: + policy_url = url + # Can't use canned policy + policy = self._custom_policy(policy_url, expires=expire_time, + valid_after=valid_after_time, + ip_address=ip_address) + + encoded_policy = self._url_base64_encode(policy) + params["Policy"] = encoded_policy + #sign the policy + signature = self._sign_string(policy, private_key_file, private_key_string) + #now base64 encode the signature (URL safe as well) + encoded_signature = self._url_base64_encode(signature) + params["Signature"] = encoded_signature + params["Key-Pair-Id"] = keypair_id + return params + + @staticmethod + def _canned_policy(resource, expires): + """ + Creates a canned policy string. + """ + policy = ('{"Statement":[{"Resource":"%(resource)s",' + '"Condition":{"DateLessThan":{"AWS:EpochTime":' + '%(expires)s}}}]}' % locals()) + return policy + + @staticmethod + def _custom_policy(resource, expires=None, valid_after=None, ip_address=None): + """ + Creates a custom policy string based on the supplied parameters. + """ + condition = {} + # SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy + # The 'DateLessThan' property is required. + if not expires: + # Defaults to ONE day + expires = int(time.time()) + 86400 + condition["DateLessThan"] = {"AWS:EpochTime": expires} + if valid_after: + condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after} + if ip_address: + if '/' not in ip_address: + ip_address += "/32" + condition["IpAddress"] = {"AWS:SourceIp": ip_address} + policy = {"Statement": [{ + "Resource": resource, + "Condition": condition}]} + return json.dumps(policy, separators=(",", ":")) + + @staticmethod + def _sign_string(message, private_key_file=None, private_key_string=None): + """ + Signs a string for use with Amazon CloudFront. + Requires the rsa library be installed. + """ + try: + import rsa + except ImportError: + raise NotImplementedError("Boto depends on the python rsa " + "library to generate signed URLs for " + "CloudFront") + # Make sure only one of private_key_file and private_key_string is set + if private_key_file and private_key_string: + raise ValueError("Only specify the private_key_file or the private_key_string not both") + if not private_key_file and not private_key_string: + raise ValueError("You must specify one of private_key_file or private_key_string") + # If private_key_file is a file name, open it and read it + if private_key_string is None: + if isinstance(private_key_file, six.string_types): + with open(private_key_file, 'r') as file_handle: + private_key_string = file_handle.read() + # Otherwise, treat it like a file + else: + private_key_string = private_key_file.read() + + # Sign it! + private_key = rsa.PrivateKey.load_pkcs1(private_key_string) + signature = rsa.sign(str(message), private_key, 'SHA-1') + return signature + + @staticmethod + def _url_base64_encode(msg): + """ + Base64 encodes a string using the URL-safe characters specified by + Amazon. + """ + msg_base64 = base64.b64encode(msg) + msg_base64 = msg_base64.replace('+', '-') + msg_base64 = msg_base64.replace('=', '_') + msg_base64 = msg_base64.replace('/', '~') + return msg_base64 + +class StreamingDistribution(Distribution): + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + super(StreamingDistribution, self).__init__(connection, config, + domain_name, id, last_modified_time, status) + self._object_class = StreamingObject + + def startElement(self, name, attrs, connection): + if name == 'StreamingDistributionConfig': + self.config = StreamingDistributionConfig() + return self.config + else: + return super(StreamingDistribution, self).startElement(name, attrs, + connection) + + def update(self, enabled=None, cnames=None, comment=None): + """ + Update the configuration of the StreamingDistribution. The only values + of the StreamingDistributionConfig that can be directly updated are: + + * CNAMES + * Comment + * Whether the Distribution is enabled or not + + Any changes to the ``trusted_signers`` or ``origin`` properties of + this distribution's current config object will also be included in + the update. Therefore, to set the origin access identity for this + distribution, set + ``StreamingDistribution.config.origin.origin_access_identity`` + before calling this update method. + + :type enabled: bool + :param enabled: Whether the StreamingDistribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + """ + new_config = StreamingDistributionConfig(self.connection, + self.config.origin, + self.config.enabled, + self.config.caller_reference, + self.config.cnames, + self.config.comment, + self.config.trusted_signers) + if enabled is not None: + new_config.enabled = enabled + if cnames is not None: + new_config.cnames = cnames + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_streaming_distribution_config(self.id, + self.etag, + new_config) + self.config = new_config + self._object_class = StreamingObject + + def delete(self): + self.connection.delete_streaming_distribution(self.id, self.etag) + + diff --git a/ext/boto/cloudfront/exception.py b/ext/boto/cloudfront/exception.py new file mode 100644 index 0000000000..768064210c --- /dev/null +++ b/ext/boto/cloudfront/exception.py @@ -0,0 +1,26 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + +class CloudFrontServerError(BotoServerError): + + pass diff --git a/ext/boto/cloudfront/identity.py b/ext/boto/cloudfront/identity.py new file mode 100644 index 0000000000..de79c8ac76 --- /dev/null +++ b/ext/boto/cloudfront/identity.py @@ -0,0 +1,121 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +class OriginAccessIdentity(object): + def __init__(self, connection=None, config=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.config = config + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + if name == 'CloudFrontOriginAccessIdentityConfig': + self.config = OriginAccessIdentityConfig() + return self.config + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def update(self, comment=None): + new_config = OriginAccessIdentityConfig(self.connection, + self.config.caller_reference, + self.config.comment) + if comment is not None: + new_config.comment = comment + self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) + self.config = new_config + + def delete(self): + return self.connection.delete_origin_access_identity(self.id, self.etag) + + def uri(self): + return 'origin-access-identity/cloudfront/%s' % self.id + + +class OriginAccessIdentityConfig(object): + def __init__(self, connection=None, caller_reference='', comment=''): + self.connection = connection + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.comment = comment + + def to_xml(self): + s = '\n' + s += '\n' + s += ' %s\n' % self.caller_reference + if self.comment: + s += ' %s\n' % self.comment + s += '\n' + return s + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Comment': + self.comment = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) + + +class OriginAccessIdentitySummary(object): + def __init__(self, connection=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def get_origin_access_identity(self): + return self.connection.get_origin_access_identity_info(self.id) + diff --git a/ext/boto/cloudfront/invalidation.py b/ext/boto/cloudfront/invalidation.py new file mode 100644 index 0000000000..385d099e73 --- /dev/null +++ b/ext/boto/cloudfront/invalidation.py @@ -0,0 +1,216 @@ +# Copyright (c) 2006-2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +from boto.compat import urllib +from boto.resultset import ResultSet + + +class InvalidationBatch(object): + """A simple invalidation request. + :see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html + """ + + def __init__(self, paths=None, connection=None, distribution=None, caller_reference=''): + """Create a new invalidation request: + :paths: An array of paths to invalidate + """ + self.paths = paths or [] + self.distribution = distribution + self.caller_reference = caller_reference + if not self.caller_reference: + self.caller_reference = str(uuid.uuid4()) + + # If we passed in a distribution, + # then we use that as the connection object + if distribution: + self.connection = distribution + else: + self.connection = connection + + def __repr__(self): + return '' % self.id + + def add(self, path): + """Add another path to this invalidation request""" + return self.paths.append(path) + + def remove(self, path): + """Remove a path from this invalidation request""" + return self.paths.remove(path) + + def __iter__(self): + return iter(self.paths) + + def __getitem__(self, i): + return self.paths[i] + + def __setitem__(self, k, v): + self.paths[k] = v + + def escape(self, p): + """Escape a path, make sure it begins with a slash and contains no invalid characters. Retain literal wildcard characters.""" + if not p[0] == "/": + p = "/%s" % p + return urllib.parse.quote(p, safe = "/*") + + def to_xml(self): + """Get this batch as XML""" + assert self.connection is not None + s = '\n' + s += '\n' % self.connection.Version + for p in self.paths: + s += ' %s\n' % self.escape(p) + s += ' %s\n' % self.caller_reference + s += '\n' + return s + + def startElement(self, name, attrs, connection): + if name == "InvalidationBatch": + self.paths = [] + return None + + def endElement(self, name, value, connection): + if name == 'Path': + self.paths.append(value) + elif name == "Status": + self.status = value + elif name == "Id": + self.id = value + elif name == "CreateTime": + self.create_time = value + elif name == "CallerReference": + self.caller_reference = value + return None + + +class InvalidationListResultSet(object): + """ + A resultset for listing invalidations on a given CloudFront distribution. + Implements the iterator interface and transparently handles paging results + from CF so even if you have many thousands of invalidations on the + distribution you can iterate over all invalidations in a reasonably + efficient manner. + """ + def __init__(self, markers=None, connection=None, distribution_id=None, + invalidations=None, marker='', next_marker=None, + max_items=None, is_truncated=False): + self.markers = markers or [] + self.connection = connection + self.distribution_id = distribution_id + self.marker = marker + self.next_marker = next_marker + self.max_items = max_items + self.auto_paginate = max_items is None + self.is_truncated = is_truncated + self._inval_cache = invalidations or [] + + def __iter__(self): + """ + A generator function for listing invalidation requests for a given + CloudFront distribution. + """ + conn = self.connection + distribution_id = self.distribution_id + result_set = self + for inval in result_set._inval_cache: + yield inval + if not self.auto_paginate: + return + while result_set.is_truncated: + result_set = conn.get_invalidation_requests(distribution_id, + marker=result_set.next_marker, + max_items=result_set.max_items) + for i in result_set._inval_cache: + yield i + + def startElement(self, name, attrs, connection): + for root_elem, handler in self.markers: + if name == root_elem: + obj = handler(connection, distribution_id=self.distribution_id) + self._inval_cache.append(obj) + return obj + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'MaxItems': + self.max_items = int(value) + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + +class InvalidationSummary(object): + """ + Represents InvalidationSummary complex type in CloudFront API that lists + the id and status of a given invalidation request. + """ + def __init__(self, connection=None, distribution_id=None, id='', + status=''): + self.connection = connection + self.distribution_id = distribution_id + self.id = id + self.status = status + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Status': + self.status = value + + def get_distribution(self): + """ + Returns a Distribution object representing the parent CloudFront + distribution of the invalidation request listed in the + InvalidationSummary. + + :rtype: :class:`boto.cloudfront.distribution.Distribution` + :returns: A Distribution object representing the parent CloudFront + distribution of the invalidation request listed in the + InvalidationSummary + """ + return self.connection.get_distribution_info(self.distribution_id) + + def get_invalidation_request(self): + """ + Returns an InvalidationBatch object representing the invalidation + request referred to in the InvalidationSummary. + + :rtype: :class:`boto.cloudfront.invalidation.InvalidationBatch` + :returns: An InvalidationBatch object representing the invalidation + request referred to by the InvalidationSummary + """ + return self.connection.invalidation_request_status( + self.distribution_id, self.id) diff --git a/ext/boto/cloudfront/logging.py b/ext/boto/cloudfront/logging.py new file mode 100644 index 0000000000..6c2f4fde2f --- /dev/null +++ b/ext/boto/cloudfront/logging.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LoggingInfo(object): + + def __init__(self, bucket='', prefix=''): + self.bucket = bucket + self.prefix = prefix + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket = value + elif name == 'Prefix': + self.prefix = value + else: + setattr(self, name, value) + diff --git a/ext/boto/cloudfront/object.py b/ext/boto/cloudfront/object.py new file mode 100644 index 0000000000..24fc85064c --- /dev/null +++ b/ext/boto/cloudfront/object.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.key import Key + +class Object(Key): + + def __init__(self, bucket, name=None): + super(Object, self).__init__(bucket, name=name) + self.distribution = bucket.distribution + + def __repr__(self): + return '' % (self.distribution.config.origin, self.name) + + def url(self, scheme='http'): + url = '%s://' % scheme + url += self.distribution.domain_name + if scheme.lower().startswith('rtmp'): + url += '/cfx/st/' + else: + url += '/' + url += self.name + return url + +class StreamingObject(Object): + + def url(self, scheme='rtmp'): + return super(StreamingObject, self).url(scheme) + + diff --git a/ext/boto/cloudfront/origin.py b/ext/boto/cloudfront/origin.py new file mode 100644 index 0000000000..b88ec7e7f8 --- /dev/null +++ b/ext/boto/cloudfront/origin.py @@ -0,0 +1,150 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.cloudfront.identity import OriginAccessIdentity + +def get_oai_value(origin_access_identity): + if isinstance(origin_access_identity, OriginAccessIdentity): + return origin_access_identity.uri() + else: + return origin_access_identity + +class S3Origin(object): + """ + Origin information to associate with the distribution. + If your distribution will use an Amazon S3 origin, + then you use the S3Origin element. + """ + + def __init__(self, dns_name=None, origin_access_identity=None): + """ + :param dns_name: The DNS name of your Amazon S3 bucket to + associate with the distribution. + For example: mybucket.s3.amazonaws.com. + :type dns_name: str + + :param origin_access_identity: The CloudFront origin access + identity to associate with the + distribution. If you want the + distribution to serve private content, + include this element; if you want the + distribution to serve public content, + remove this element. + :type origin_access_identity: str + + """ + self.dns_name = dns_name + self.origin_access_identity = origin_access_identity + + def __repr__(self): + return '' % self.dns_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DNSName': + self.dns_name = value + elif name == 'OriginAccessIdentity': + self.origin_access_identity = value + else: + setattr(self, name, value) + + def to_xml(self): + s = ' \n' + s += ' %s\n' % self.dns_name + if self.origin_access_identity: + val = get_oai_value(self.origin_access_identity) + s += ' %s\n' % val + s += ' \n' + return s + +class CustomOrigin(object): + """ + Origin information to associate with the distribution. + If your distribution will use a non-Amazon S3 origin, + then you use the CustomOrigin element. + """ + + def __init__(self, dns_name=None, http_port=80, https_port=443, + origin_protocol_policy=None): + """ + :param dns_name: The DNS name of your Amazon S3 bucket to + associate with the distribution. + For example: mybucket.s3.amazonaws.com. + :type dns_name: str + + :param http_port: The HTTP port the custom origin listens on. + :type http_port: int + + :param https_port: The HTTPS port the custom origin listens on. + :type http_port: int + + :param origin_protocol_policy: The origin protocol policy to + apply to your origin. If you + specify http-only, CloudFront + will use HTTP only to access the origin. + If you specify match-viewer, CloudFront + will fetch from your origin using HTTP + or HTTPS, based on the protocol of the + viewer request. + :type origin_protocol_policy: str + + """ + self.dns_name = dns_name + self.http_port = http_port + self.https_port = https_port + self.origin_protocol_policy = origin_protocol_policy + + def __repr__(self): + return '' % self.dns_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DNSName': + self.dns_name = value + elif name == 'HTTPPort': + try: + self.http_port = int(value) + except ValueError: + self.http_port = value + elif name == 'HTTPSPort': + try: + self.https_port = int(value) + except ValueError: + self.https_port = value + elif name == 'OriginProtocolPolicy': + self.origin_protocol_policy = value + else: + setattr(self, name, value) + + def to_xml(self): + s = ' \n' + s += ' %s\n' % self.dns_name + s += ' %d\n' % self.http_port + s += ' %d\n' % self.https_port + s += ' %s\n' % self.origin_protocol_policy + s += ' \n' + return s + diff --git a/ext/boto/cloudfront/signers.py b/ext/boto/cloudfront/signers.py new file mode 100644 index 0000000000..dcc9fc9ea3 --- /dev/null +++ b/ext/boto/cloudfront/signers.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Signer(object): + def __init__(self): + self.id = None + self.key_pair_ids = [] + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.id = 'Self' + elif name == 'AwsAccountNumber': + self.id = value + elif name == 'KeyPairId': + self.key_pair_ids.append(value) + + +class ActiveTrustedSigners(list): + def startElement(self, name, attrs, connection): + if name == 'Signer': + s = Signer() + self.append(s) + return s + + def endElement(self, name, value, connection): + pass + + +class TrustedSigners(list): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.append(name) + elif name == 'AwsAccountNumber': + self.append(value) + diff --git a/ext/boto/cloudhsm/__init__.py b/ext/boto/cloudhsm/__init__.py new file mode 100644 index 0000000000..5b5c4f30f8 --- /dev/null +++ b/ext/boto/cloudhsm/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS CloudHSM service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudhsm.layer1 import CloudHSMConnection + return get_regions('cloudhsm', connection_cls=CloudHSMConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cloudhsm.layer1 import CloudHSMConnection + return connect('cloudhsm', region_name, connection_cls=CloudHSMConnection, + **kw_params) diff --git a/ext/boto/cloudhsm/exceptions.py b/ext/boto/cloudhsm/exceptions.py new file mode 100644 index 0000000000..1e14abe175 --- /dev/null +++ b/ext/boto/cloudhsm/exceptions.py @@ -0,0 +1,35 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidRequestException(BotoServerError): + pass + + +class CloudHsmServiceException(BotoServerError): + pass + + +class CloudHsmInternalException(BotoServerError): + pass diff --git a/ext/boto/cloudhsm/layer1.py b/ext/boto/cloudhsm/layer1.py new file mode 100644 index 0000000000..e0877736e2 --- /dev/null +++ b/ext/boto/cloudhsm/layer1.py @@ -0,0 +1,448 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudhsm import exceptions + + +class CloudHSMConnection(AWSQueryConnection): + """ + AWS CloudHSM Service + """ + APIVersion = "2014-05-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com" + ServiceName = "CloudHSM" + TargetPrefix = "CloudHsmFrontendService" + ResponseError = JSONResponseError + + _faults = { + "InvalidRequestException": exceptions.InvalidRequestException, + "CloudHsmServiceException": exceptions.CloudHsmServiceException, + "CloudHsmInternalException": exceptions.CloudHsmInternalException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudHSMConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_hapg(self, label): + """ + Creates a high-availability partition group. A high- + availability partition group is a group of partitions that + spans multiple physical HSMs. + + :type label: string + :param label: The label of the new high-availability partition group. + + """ + params = {'Label': label, } + return self.make_request(action='CreateHapg', + body=json.dumps(params)) + + def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type, + eni_ip=None, external_id=None, client_token=None, + syslog_ip=None): + """ + Creates an uninitialized HSM instance. Running this command + provisions an HSM appliance and will result in charges to your + AWS account for the HSM. + + :type subnet_id: string + :param subnet_id: The identifier of the subnet in your VPC in which to + place the HSM. + + :type ssh_key: string + :param ssh_key: The SSH public key to install on the HSM. + + :type eni_ip: string + :param eni_ip: The IP address to assign to the HSM's ENI. + + :type iam_role_arn: string + :param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM + service to allocate an ENI on your behalf. + + :type external_id: string + :param external_id: The external ID from **IamRoleArn**, if present. + + :type subscription_type: string + :param subscription_type: The subscription type. + + :type client_token: string + :param client_token: A user-defined token to ensure idempotence. + Subsequent calls to this action with the same token will be + ignored. + + :type syslog_ip: string + :param syslog_ip: The IP address for the syslog monitoring server. + + """ + params = { + 'SubnetId': subnet_id, + 'SshKey': ssh_key, + 'IamRoleArn': iam_role_arn, + 'SubscriptionType': subscription_type, + } + if eni_ip is not None: + params['EniIp'] = eni_ip + if external_id is not None: + params['ExternalId'] = external_id + if client_token is not None: + params['ClientToken'] = client_token + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='CreateHsm', + body=json.dumps(params)) + + def create_luna_client(self, certificate, label=None): + """ + Creates an HSM client. + + :type label: string + :param label: The label for the client. + + :type certificate: string + :param certificate: The contents of a Base64-Encoded X.509 v3 + certificate to be installed on the HSMs used by this client. + + """ + params = {'Certificate': certificate, } + if label is not None: + params['Label'] = label + return self.make_request(action='CreateLunaClient', + body=json.dumps(params)) + + def delete_hapg(self, hapg_arn): + """ + Deletes a high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + delete. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DeleteHapg', + body=json.dumps(params)) + + def delete_hsm(self, hsm_arn): + """ + Deletes an HSM. Once complete, this operation cannot be undone + and your key material cannot be recovered. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to delete. + + """ + params = {'HsmArn': hsm_arn, } + return self.make_request(action='DeleteHsm', + body=json.dumps(params)) + + def delete_luna_client(self, client_arn): + """ + Deletes a client. + + :type client_arn: string + :param client_arn: The ARN of the client to delete. + + """ + params = {'ClientArn': client_arn, } + return self.make_request(action='DeleteLunaClient', + body=json.dumps(params)) + + def describe_hapg(self, hapg_arn): + """ + Retrieves information about a high-availability partition + group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + describe. + + """ + params = {'HapgArn': hapg_arn, } + return self.make_request(action='DescribeHapg', + body=json.dumps(params)) + + def describe_hsm(self, hsm_arn=None, hsm_serial_number=None): + """ + Retrieves information about an HSM. You can identify the HSM + by its ARN or its serial number. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM. Either the HsmArn or the + SerialNumber parameter must be specified. + + :type hsm_serial_number: string + :param hsm_serial_number: The serial number of the HSM. Either the + HsmArn or the HsmSerialNumber parameter must be specified. + + """ + params = {} + if hsm_arn is not None: + params['HsmArn'] = hsm_arn + if hsm_serial_number is not None: + params['HsmSerialNumber'] = hsm_serial_number + return self.make_request(action='DescribeHsm', + body=json.dumps(params)) + + def describe_luna_client(self, client_arn=None, + certificate_fingerprint=None): + """ + Retrieves information about an HSM client. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate_fingerprint: string + :param certificate_fingerprint: The certificate fingerprint. + + """ + params = {} + if client_arn is not None: + params['ClientArn'] = client_arn + if certificate_fingerprint is not None: + params['CertificateFingerprint'] = certificate_fingerprint + return self.make_request(action='DescribeLunaClient', + body=json.dumps(params)) + + def get_config(self, client_arn, client_version, hapg_list): + """ + Gets the configuration files necessary to connect to all high + availability partition groups the client is associated with. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type client_version: string + :param client_version: The client version. + + :type hapg_list: list + :param hapg_list: A list of ARNs that identify the high-availability + partition groups that are associated with the client. + + """ + params = { + 'ClientArn': client_arn, + 'ClientVersion': client_version, + 'HapgList': hapg_list, + } + return self.make_request(action='GetConfig', + body=json.dumps(params)) + + def list_available_zones(self): + """ + Lists the Availability Zones that have available AWS CloudHSM + capacity. + + + """ + params = {} + return self.make_request(action='ListAvailableZones', + body=json.dumps(params)) + + def list_hapgs(self, next_token=None): + """ + Lists the high-availability partition groups for the account. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHapgs to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHapgs. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHapgs', + body=json.dumps(params)) + + def list_hsms(self, next_token=None): + """ + Retrieves the identifiers of all of the HSMs provisioned for + the current customer. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListHsms to retrieve the next set of items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListHsms. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListHsms', + body=json.dumps(params)) + + def list_luna_clients(self, next_token=None): + """ + Lists all of the clients. + + This operation supports pagination with the use of the + NextToken member. If more results are available, the NextToken + member of the response contains a token that you pass in the + next call to ListLunaClients to retrieve the next set of + items. + + :type next_token: string + :param next_token: The NextToken value from a previous call to + ListLunaClients. Pass null if this is the first call. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListLunaClients', + body=json.dumps(params)) + + def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None): + """ + Modifies an existing high-availability partition group. + + :type hapg_arn: string + :param hapg_arn: The ARN of the high-availability partition group to + modify. + + :type label: string + :param label: The new label for the high-availability partition group. + + :type partition_serial_list: list + :param partition_serial_list: The list of partition serial numbers to + make members of the high-availability partition group. + + """ + params = {'HapgArn': hapg_arn, } + if label is not None: + params['Label'] = label + if partition_serial_list is not None: + params['PartitionSerialList'] = partition_serial_list + return self.make_request(action='ModifyHapg', + body=json.dumps(params)) + + def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None, + iam_role_arn=None, external_id=None, syslog_ip=None): + """ + Modifies an HSM. + + :type hsm_arn: string + :param hsm_arn: The ARN of the HSM to modify. + + :type subnet_id: string + :param subnet_id: The new identifier of the subnet that the HSM is in. + + :type eni_ip: string + :param eni_ip: The new IP address for the elastic network interface + attached to the HSM. + + :type iam_role_arn: string + :param iam_role_arn: The new IAM role ARN. + + :type external_id: string + :param external_id: The new external ID. + + :type syslog_ip: string + :param syslog_ip: The new IP address for the syslog monitoring server. + + """ + params = {'HsmArn': hsm_arn, } + if subnet_id is not None: + params['SubnetId'] = subnet_id + if eni_ip is not None: + params['EniIp'] = eni_ip + if iam_role_arn is not None: + params['IamRoleArn'] = iam_role_arn + if external_id is not None: + params['ExternalId'] = external_id + if syslog_ip is not None: + params['SyslogIp'] = syslog_ip + return self.make_request(action='ModifyHsm', + body=json.dumps(params)) + + def modify_luna_client(self, client_arn, certificate): + """ + Modifies the certificate used by the client. + + This action can potentially start a workflow to install the + new certificate on the client's HSMs. + + :type client_arn: string + :param client_arn: The ARN of the client. + + :type certificate: string + :param certificate: The new certificate for the client. + + """ + params = { + 'ClientArn': client_arn, + 'Certificate': certificate, + } + return self.make_request(action='ModifyLunaClient', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/cloudsearch/__init__.py b/ext/boto/cloudsearch/__init__.py new file mode 100644 index 0000000000..42e2147ef0 --- /dev/null +++ b/ext/boto/cloudsearch/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudsearch.layer1 import Layer1 + return get_regions('cloudsearch',connection_cls=Layer1) + + +def connect_to_region(region_name, **kw_params): + from boto.cloudsearch.layer1 import Layer1 + return connect('cloudsearch', region_name, connection_cls=Layer1, + **kw_params) diff --git a/ext/boto/cloudsearch/document.py b/ext/boto/cloudsearch/document.py new file mode 100644 index 0000000000..0a1d9db22c --- /dev/null +++ b/ext/boto/cloudsearch/document.py @@ -0,0 +1,271 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send the + commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + def add(self, _id, version, fields, lang='en'): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type version: int + :param version: Version of the document being indexed. If a file is + being reindexed, the version should be higher than the existing one + in CloudSearch. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + + :type lang: string + :param lang: The language code the data is in. Only 'en' is currently + supported + """ + + d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang, + 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id, version): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + + :type version: int + :param version: Version of the document to remove. The delete will only + occur if this version number is higher than the version currently + in the index. + """ + + d = {'type': 'delete', 'id': _id, 'version': version} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be reused + for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably raise ' + '500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + url = "http://%s/2011-02-01/documents/batch" % (self.endpoint) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + + return CommitResponse(r, self, sdf) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch.document.SearchServiceException` + :raises: :class:`boto.cloudsearch.document.EncodingError` + :raises: :class:`boto.cloudsearch.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + + _body = response.content.decode('utf-8') + + try: + self.content = json.loads(_body) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n' + 'SDF:\n{1}'.format(_body, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=_body) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', + [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + if 'adds' not in self.content or 'deletes' not in self.content: + raise SearchServiceException("Error indexing documents" + " => %s" % self.content.get('message', '')) + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + raise CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\ + .format(type_, commit_num, response_num)) diff --git a/ext/boto/cloudsearch/domain.py b/ext/boto/cloudsearch/domain.py new file mode 100644 index 0000000000..9800b17512 --- /dev/null +++ b/ext/boto/cloudsearch/domain.py @@ -0,0 +1,394 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.cloudsearch.optionstatus import OptionStatus +from boto.cloudsearch.optionstatus import IndexFieldStatus +from boto.cloudsearch.optionstatus import ServicePoliciesStatus +from boto.cloudsearch.optionstatus import RankExpressionStatus +from boto.cloudsearch.document import DocumentServiceConnection +from boto.cloudsearch.search import SearchConnection + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['created'] + self.deleted = data['deleted'] + self.processing = data['processing'] + self.requires_index_documents = data['requires_index_documents'] + self.domain_id = data['domain_id'] + self.domain_name = data['domain_name'] + self.num_searchable_docs = data['num_searchable_docs'] + self.search_instance_count = data['search_instance_count'] + self.search_instance_type = data.get('search_instance_type', None) + self.search_partition_count = data['search_partition_count'] + self._doc_service = data['doc_service'] + self._search_service = data['search_service'] + + @property + def doc_service_arn(self): + return self._doc_service['arn'] + + @property + def doc_service_endpoint(self): + return self._doc_service['endpoint'] + + @property + def search_service_arn(self): + return self._search_service['arn'] + + @property + def search_service_endpoint(self): + return self._search_service['endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def num_searchable_docs(self): + return self._num_searchable_docs + + @num_searchable_docs.setter + def num_searchable_docs(self, value): + self._num_searchable_docs = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_stemming(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined stemming options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_stemming_options, + self.layer1.update_stemming_options) + + def get_stopwords(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined stopword options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_stopword_options, + self.layer1.update_stopword_options) + + def get_synonyms(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined synonym options for + the domain. + """ + return OptionStatus(self, None, + self.layer1.describe_synonym_options, + self.layer1.update_synonym_options) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch.option.OptionStatus` object + representing the currently defined access policies for + the domain. + """ + return ServicePoliciesStatus(self, None, + self.layer1.describe_service_access_policies, + self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptioState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + """ + data = self.layer1.describe_index_fields(self.name, field_names) + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, result=False, searchable=False, + source_attributes=[]): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + uint | literal | text + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``uint`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``uint``. + + :type results: bool + :param results: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. Does not apply to fields of type ``uint``. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. Applies only to fields + of type ``literal``. + + :type source_attributes: list of dicts + :param source_attributes: An optional list of dicts that + provide information about attributes for this index field. + A maximum of 20 source attributes can be configured for + each index field. + + Each item in the list is a dict with the following keys: + + * data_copy - The value is a dict with the following keys: + * default - Optional default value if the source attribute + is not specified in a document. + * name - The name of the document source field to add + to this ``IndexField``. + * data_function - Identifies the transformation to apply + when copying data from a source attribute. + * data_map - The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + * data_trim_title - Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_index_field(self.name, field_name, + field_type, default=default, + facet=facet, result=result, + searchable=searchable, + source_attributes=source_attributes) + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_rank_expressions(self, rank_names=None): + """ + Return a list of rank expressions defined for this domain. + """ + fn = self.layer1.describe_rank_expressions + data = fn(self.name, rank_names) + return [RankExpressionStatus(self, d, fn) for d in data] + + def create_rank_expression(self, name, expression): + """ + Create a new rank expression. + + :type rank_name: string + :param rank_name: The name of an expression computed for ranking + while processing a search request. + + :type rank_expression: string + :param rank_expression: The expression to evaluate for ranking + or thresholding while processing a search request. The + RankExpression syntax is based on JavaScript expressions + and supports: + + * Integer, floating point, hex and octal literals + * Shortcut evaluation of logical operators such that an + expression a || b evaluates to the value a if a is + true without evaluting b at all + * JavaScript order of precedence for operators + * Arithmetic operators: + - * / % + * Boolean operators (including the ternary operator) + * Bitwise operators + * Comparison operators + * Common mathematic functions: abs ceil erf exp floor + lgamma ln log2 log10 max min sqrt pow + * Trigonometric library functions: acosh acos asinh asin + atanh atan cosh cos sinh sin tanh tan + * Random generation of a number between 0 and 1: rand + * Current time in epoch: time + * The min max functions that operate on a variable argument list + + Intermediate results are calculated as double precision + floating point values. The final return value of a + RankExpression is automatically converted from floating + point to a 32-bit unsigned integer by rounding to the + nearest integer, with a natural floor of 0 and a ceiling + of max(uint32_t), 4294967295. Mathematical errors such as + dividing by 0 will fail during evaluation and return a + value of 0. + + The source data for a RankExpression can be the name of an + IndexField of type uint, another RankExpression or the + reserved name text_relevance. The text_relevance source is + defined to return an integer from 0 to 1000 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_rank_expression(self.name, name, expression) + return RankExpressionStatus(self, data, + self.layer1.describe_rank_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name + diff --git a/ext/boto/cloudsearch/layer1.py b/ext/boto/cloudsearch/layer1.py new file mode 100644 index 0000000000..69132e39ce --- /dev/null +++ b/ext/boto/cloudsearch/layer1.py @@ -0,0 +1,747 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import boto.jsonresponse +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo + +#boto.set_stream_logger('cloudsearch') + + +def do_bool(val): + return 'true' if val in [True, 1, '1', 'true'] else 'false' + + +class Layer1(AWSQueryConnection): + + APIVersion = '2011-02-01' + DefaultRegionName = boto.config.get('Boto', 'cs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'cs_region_endpoint', + 'cloudsearch.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__( + self, + host=self.region.endpoint, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + proxy_user=proxy_user, + proxy_pass=proxy_pass, + debug=debug, + https_connection_factory=https_connection_factory, + path=path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_response(self, doc_path, action, params, path='/', + parent=None, verb='GET', list_marker=None): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + e = boto.jsonresponse.Element( + list_marker=list_marker if list_marker else 'Set', + pythonize_name=True) + h = boto.jsonresponse.XmlHandler(e, parent) + h.parse(body) + inner = e + for p in doc_path: + inner = inner.get(p) + if not inner: + return None if list_marker is None else [] + if isinstance(inner, list): + return inner + else: + return dict(**inner) + else: + raise self.ResponseError(response.status, response.reason, body) + + def create_domain(self, domain_name): + """ + Create a new search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, LimitExceededException + """ + doc_path = ('create_domain_response', + 'create_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'CreateDomain', + params, verb='POST') + + def define_index_field(self, domain_name, field_name, field_type, + default='', facet=False, result=False, + searchable=False, source_attributes=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + uint | literal | text + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``uint`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``uint``. + + :type results: bool + :param results: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. Does not apply to fields of type ``uint``. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. Applies only to fields + of type ``literal``. + + :type source_attributes: list of dicts + :param source_attributes: An optional list of dicts that + provide information about attributes for this index field. + A maximum of 20 source attributes can be configured for + each index field. + + Each item in the list is a dict with the following keys: + + * data_copy - The value is a dict with the following keys: + * default - Optional default value if the source attribute + is not specified in a document. + * name - The name of the document source field to add + to this ``IndexField``. + * data_function - Identifies the transformation to apply + when copying data from a source attribute. + * data_map - The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + * data_trim_title - Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_index_field_response', + 'define_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexField.IndexFieldName': field_name, + 'IndexField.IndexFieldType': field_type} + if field_type == 'literal': + params['IndexField.LiteralOptions.DefaultValue'] = default + params['IndexField.LiteralOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.LiteralOptions.ResultEnabled'] = do_bool(result) + params['IndexField.LiteralOptions.SearchEnabled'] = do_bool(searchable) + elif field_type == 'uint': + params['IndexField.UIntOptions.DefaultValue'] = default + elif field_type == 'text': + params['IndexField.TextOptions.DefaultValue'] = default + params['IndexField.TextOptions.FacetEnabled'] = do_bool(facet) + params['IndexField.TextOptions.ResultEnabled'] = do_bool(result) + + return self.get_response(doc_path, 'DefineIndexField', + params, verb='POST') + + def define_rank_expression(self, domain_name, rank_name, rank_expression): + """ + Defines a RankExpression, either replacing an existing + definition or creating a new one. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_name: string + :param rank_name: The name of an expression computed for ranking + while processing a search request. + + :type rank_expression: string + :param rank_expression: The expression to evaluate for ranking + or thresholding while processing a search request. The + RankExpression syntax is based on JavaScript expressions + and supports: + + * Integer, floating point, hex and octal literals + * Shortcut evaluation of logical operators such that an + expression a || b evaluates to the value a if a is + true without evaluting b at all + * JavaScript order of precedence for operators + * Arithmetic operators: + - * / % + * Boolean operators (including the ternary operator) + * Bitwise operators + * Comparison operators + * Common mathematic functions: abs ceil erf exp floor + lgamma ln log2 log10 max min sqrt pow + * Trigonometric library functions: acosh acos asinh asin + atanh atan cosh cos sinh sin tanh tan + * Random generation of a number between 0 and 1: rand + * Current time in epoch: time + * The min max functions that operate on a variable argument list + + Intermediate results are calculated as double precision + floating point values. The final return value of a + RankExpression is automatically converted from floating + point to a 32-bit unsigned integer by rounding to the + nearest integer, with a natural floor of 0 and a ceiling + of max(uint32_t), 4294967295. Mathematical errors such as + dividing by 0 will fail during evaluation and return a + value of 0. + + The source data for a RankExpression can be the name of an + IndexField of type uint, another RankExpression or the + reserved name text_relevance. The text_relevance source is + defined to return an integer from 0 to 1000 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + doc_path = ('define_rank_expression_response', + 'define_rank_expression_result', + 'rank_expression') + params = {'DomainName': domain_name, + 'RankExpression.RankExpression': rank_expression, + 'RankExpression.RankName': rank_name} + return self.get_response(doc_path, 'DefineRankExpression', + params, verb='POST') + + def delete_domain(self, domain_name): + """ + Delete a search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException + """ + doc_path = ('delete_domain_response', + 'delete_domain_result', + 'domain_status') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DeleteDomain', + params, verb='POST') + + def delete_index_field(self, domain_name, field_name): + """ + Deletes an existing ``IndexField`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_name: string + :param field_name: A string that represents the name of + an index field. Field names must begin with a letter and + can contain the following characters: a-z (lowercase), + 0-9, and _ (underscore). Uppercase letters and hyphens are + not allowed. The names "body", "docid", and + "text_relevance" are reserved and cannot be specified as + field or rank expression names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_index_field_response', + 'delete_index_field_result', + 'index_field') + params = {'DomainName': domain_name, + 'IndexFieldName': field_name} + return self.get_response(doc_path, 'DeleteIndexField', + params, verb='POST') + + def delete_rank_expression(self, domain_name, rank_name): + """ + Deletes an existing ``RankExpression`` from the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_name: string + :param rank_name: Name of the ``RankExpression`` to delete. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('delete_rank_expression_response', + 'delete_rank_expression_result', + 'rank_expression') + params = {'DomainName': domain_name, 'RankName': rank_name} + return self.get_response(doc_path, 'DeleteRankExpression', + params, verb='POST') + + def describe_default_search_field(self, domain_name): + """ + Describes options defining the default search field used by + indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_default_search_field_response', + 'describe_default_search_field_result', + 'default_search_field') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeDefaultSearchField', + params, verb='POST') + + def describe_domains(self, domain_names=None): + """ + Describes the domains (optionally limited to one or more + domains by name) owned by this account. + + :type domain_names: list + :param domain_names: Limits the response to the specified domains. + + :raises: BaseException, InternalException + """ + doc_path = ('describe_domains_response', + 'describe_domains_result', + 'domain_status_list') + params = {} + if domain_names: + for i, domain_name in enumerate(domain_names, 1): + params['DomainNames.member.%d' % i] = domain_name + return self.get_response(doc_path, 'DescribeDomains', + params, verb='POST', + list_marker='DomainStatusList') + + def describe_index_fields(self, domain_name, field_names=None): + """ + Describes index fields in the search domain, optionally + limited to a single ``IndexField``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type field_names: list + :param field_names: Limits the response to the specified fields. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_index_fields_response', + 'describe_index_fields_result', + 'index_fields') + params = {'DomainName': domain_name} + if field_names: + for i, field_name in enumerate(field_names, 1): + params['FieldNames.member.%d' % i] = field_name + return self.get_response(doc_path, 'DescribeIndexFields', + params, verb='POST', + list_marker='IndexFields') + + def describe_rank_expressions(self, domain_name, rank_names=None): + """ + Describes RankExpressions in the search domain, optionally + limited to a single expression. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type rank_names: list + :param rank_names: Limit response to the specified rank names. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_rank_expressions_response', + 'describe_rank_expressions_result', + 'rank_expressions') + params = {'DomainName': domain_name} + if rank_names: + for i, rank_name in enumerate(rank_names, 1): + params['RankNames.member.%d' % i] = rank_name + return self.get_response(doc_path, 'DescribeRankExpressions', + params, verb='POST', + list_marker='RankExpressions') + + def describe_service_access_policies(self, domain_name): + """ + Describes the resource-based policies controlling access to + the services in this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_service_access_policies_response', + 'describe_service_access_policies_result', + 'access_policies') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeServiceAccessPolicies', + params, verb='POST') + + def describe_stemming_options(self, domain_name): + """ + Describes stemming options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_stemming_options_response', + 'describe_stemming_options_result', + 'stems') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeStemmingOptions', + params, verb='POST') + + def describe_stopword_options(self, domain_name): + """ + Describes stopword options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_stopword_options_response', + 'describe_stopword_options_result', + 'stopwords') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeStopwordOptions', + params, verb='POST') + + def describe_synonym_options(self, domain_name): + """ + Describes synonym options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('describe_synonym_options_response', + 'describe_synonym_options_result', + 'synonyms') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'DescribeSynonymOptions', + params, verb='POST') + + def index_documents(self, domain_name): + """ + Tells the search domain to start scanning its documents using + the latest text processing options and ``IndexFields``. This + operation must be invoked to make visible in searches any + options whose OptionStatus has ``OptionState`` of + ``RequiresIndexDocuments``. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :raises: BaseException, InternalException, ResourceNotFoundException + """ + doc_path = ('index_documents_response', + 'index_documents_result', + 'field_names') + params = {'DomainName': domain_name} + return self.get_response(doc_path, 'IndexDocuments', params, + verb='POST', list_marker='FieldNames') + + def update_default_search_field(self, domain_name, default_search_field): + """ + Updates options defining the default search field used by + indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type default_search_field: string + :param default_search_field: The IndexField to use for search + requests issued with the q parameter. The default is an + empty string, which automatically searches all text + fields. + + :raises: BaseException, InternalException, InvalidTypeException, + ResourceNotFoundException + """ + doc_path = ('update_default_search_field_response', + 'update_default_search_field_result', + 'default_search_field') + params = {'DomainName': domain_name, + 'DefaultSearchField': default_search_field} + return self.get_response(doc_path, 'UpdateDefaultSearchField', + params, verb='POST') + + def update_service_access_policies(self, domain_name, access_policies): + """ + Updates the policies controlling access to the services in + this search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type access_policies: string + :param access_policies: An IAM access policy as described in + The Access Policy Language in Using AWS Identity and + Access Management. The maximum size of an access policy + document is 100KB. + + :raises: BaseException, InternalException, LimitExceededException, + ResourceNotFoundException, InvalidTypeException + """ + doc_path = ('update_service_access_policies_response', + 'update_service_access_policies_result', + 'access_policies') + params = {'AccessPolicies': access_policies, + 'DomainName': domain_name} + return self.get_response(doc_path, 'UpdateServiceAccessPolicies', + params, verb='POST') + + def update_stemming_options(self, domain_name, stems): + """ + Updates stemming options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type stems: string + :param stems: Maps terms to their stems. The JSON object + has a single key called "stems" whose value is a + dict mapping terms to their stems. The maximum size + of a stemming document is 500KB. + Example: {"stems":{"people": "person", "walking":"walk"}} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_stemming_options_response', + 'update_stemming_options_result', + 'stems') + params = {'DomainName': domain_name, + 'Stems': stems} + return self.get_response(doc_path, 'UpdateStemmingOptions', + params, verb='POST') + + def update_stopword_options(self, domain_name, stopwords): + """ + Updates stopword options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type stopwords: string + :param stopwords: Lists stopwords in a JSON object. The object has a + single key called "stopwords" whose value is an array of strings. + The maximum size of a stopwords document is 10KB. Example: + {"stopwords": ["a", "an", "the", "of"]} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_stopword_options_response', + 'update_stopword_options_result', + 'stopwords') + params = {'DomainName': domain_name, + 'Stopwords': stopwords} + return self.get_response(doc_path, 'UpdateStopwordOptions', + params, verb='POST') + + def update_synonym_options(self, domain_name, synonyms): + """ + Updates synonym options used by indexing for the search domain. + + :type domain_name: string + :param domain_name: A string that represents the name of a + domain. Domain names must be unique across the domains + owned by an account within an AWS region. Domain names + must start with a letter or number and can contain the + following characters: a-z (lowercase), 0-9, and - + (hyphen). Uppercase letters and underscores are not + allowed. + + :type synonyms: string + :param synonyms: Maps terms to their synonyms. The JSON object + has a single key "synonyms" whose value is a dict mapping terms + to their synonyms. Each synonym is a simple string or an + array of strings. The maximum size of a stopwords document + is 100KB. Example: + {"synonyms": {"cat": ["feline", "kitten"], "puppy": "dog"}} + + :raises: BaseException, InternalException, InvalidTypeException, + LimitExceededException, ResourceNotFoundException + """ + doc_path = ('update_synonym_options_response', + 'update_synonym_options_result', + 'synonyms') + params = {'DomainName': domain_name, + 'Synonyms': synonyms} + return self.get_response(doc_path, 'UpdateSynonymOptions', + params, verb='POST') diff --git a/ext/boto/cloudsearch/layer2.py b/ext/boto/cloudsearch/layer2.py new file mode 100644 index 0000000000..b565d4b5b9 --- /dev/null +++ b/ext/boto/cloudsearch/layer2.py @@ -0,0 +1,75 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch.layer1 import Layer1 +from boto.cloudsearch.domain import Domain + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True): + self.layer1 = Layer1( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs) + + def list_domains(self, domain_names=None): + """ + Return a list of :class:`boto.cloudsearch.domain.Domain` + objects for each domain defined in the current account. + """ + domain_data = self.layer1.describe_domains(domain_names) + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding + :class:`boto.cloudsearch.domain.Domain` object. + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff --git a/ext/boto/cloudsearch/optionstatus.py b/ext/boto/cloudsearch/optionstatus.py new file mode 100644 index 0000000000..dddda76f97 --- /dev/null +++ b/ext/boto/cloudsearch/optionstatus.py @@ -0,0 +1,248 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['creation_date'] + self.status = status['state'] + self.update_date = status['update_date'] + self.update_version = int(status['update_version']) + + def _update_options(self, options): + if options: + self.update(json.loads(options)) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + if data: + self._update_status(data['status']) + self._update_options(data['options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'CreationDate': + self.created = value + elif name == 'State': + self.state = value + elif name == 'UpdateDate': + self.updated = value + elif name == 'UpdateVersion': + self.update_version = int(value) + elif name == 'Options': + self.update_from_json_doc(value) + else: + setattr(self, name, value) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + def wait_for_state(self, state): + """ + Performs polling of CloudSearch to wait for the ``state`` + of this object to change to the provided state. + """ + while self.state != state: + time.sleep(5) + self.refresh() + + +class IndexFieldStatus(OptionStatus): + + def _update_options(self, options): + self.update(options) + + def save(self): + pass + + +class RankExpressionStatus(IndexFieldStatus): + + pass + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect":"Allow", + "Action":"*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.search_service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.doc_service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.search_service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.doc_service_arn + self._disallow_ip(arn, ip) diff --git a/ext/boto/cloudsearch/search.py b/ext/boto/cloudsearch/search.py new file mode 100644 index 0000000000..70ea479bec --- /dev/null +++ b/ext/boto/cloudsearch/search.py @@ -0,0 +1,377 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from math import ceil +from boto.compat import json, map, six +import requests + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['info']['rid'] + # self.doc_coverage_pct = attrs['info']['doc-coverage-pct'] + self.cpu_time_ms = attrs['info']['cpu-time-ms'] + self.time_ms = attrs['info']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + self.rank = attrs['rank'] + self.match_expression = attrs['match-expr'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].items(): + if 'constraints' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints'])) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, bq=None, rank=None, + return_fields=None, size=10, + start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + + self.q = q + self.bq = bq + self.rank = rank or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or [] + self.facet_constraints = facet_constraints or {} + self.facet_sort = facet_sort or {} + self.facet_top_n = facet_top_n or {} + self.t = t or {} + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.bq: + params['bq'] = self.bq + + if self.rank: + params['rank'] = ','.join(self.rank) + + if self.return_fields: + params['return-fields'] = ','.join(self.return_fields) + + if self.facet: + params['facet'] = ','.join(self.facet) + + if self.facet_constraints: + for k, v in six.iteritems(self.facet_constraints): + params['facet-%s-constraints' % k] = v + + if self.facet_sort: + for k, v in six.iteritems(self.facet_sort): + params['facet-%s-sort' % k] = v + + if self.facet_top_n: + for k, v in six.iteritems(self.facet_top_n): + params['facet-%s-top-n' % k] = v + + if self.t: + for k, v in six.iteritems(self.t): + params['t-%s' % k] = v + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not endpoint: + self.endpoint = domain.search_service_endpoint + + def build_query(self, q=None, bq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + return Query(q=q, bq=bq, rank=rank, return_fields=return_fields, + size=size, start=start, facet=facet, + facet_constraints=facet_constraints, + facet_sort=facet_sort, facet_top_n=facet_top_n, t=t) + + def search(self, q=None, bq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, facet_constraints=None, + facet_sort=None, facet_top_n=None, t=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type bq: string + :param bq: A string to perform a Boolean search. This can be used to + create advanced searches. + + :type rank: List of strings + :param rank: A list of fields or rank expressions used to order the + search results. A field can be reversed by using the - operator. + ``['-year', 'author']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: list + :param facet: List of fields for which facets should be returned + ``['colour', 'size']`` + + :type facet_constraints: dict + :param facet_constraints: Use to limit facets to specific values + specified as comma-delimited strings in a Dictionary of facets + ``{'colour': "'blue','white','red'", 'size': "big"}`` + + :type facet_sort: dict + :param facet_sort: Rules used to specify the order in which facet + values should be returned. Allowed values are *alpha*, *count*, + *max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort + the facet by number of available result. + ``{'color': 'alpha', 'size': 'count'}`` + + :type facet_top_n: dict + :param facet_top_n: Dictionary of facets and number of facets to + return. + ``{'colour': 2}`` + + :type t: dict + :param t: Specify ranges for specific fields + ``{'year': '2000..2005'}`` + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(bq="(and 'Tim' (field author 'John Smith'))") + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim: + + >>> search(q='Tim', facet=['Author']) + + With facet_constraints, facet_top_n and facet_sort more complicated + constraints can be specified such as returning the top author out of + John Smith and Mark Smith who have a document with the word Tim in it. + + >>> search(q='Tim', + ... facet=['Author'], + ... facet_constraints={'author': "'John Smith','Mark Smith'"}, + ... facet=['author'], + ... facet_top_n={'author': 1}, + ... facet_sort={'author': 'count'}) + """ + + query = self.build_query(q=q, bq=bq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + facet_constraints=facet_constraints, + facet_sort=facet_sort, + facet_top_n=facet_top_n, t=t) + return self(query) + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch.search.SearchResults` + :return: search results + """ + url = "http://%s/2011-02-01/search" % (self.endpoint) + params = query.to_params() + + r = requests.get(url, params=params) + body = r.content.decode('utf-8') + try: + data = json.loads(body) + except ValueError as e: + if r.status_code == 403: + msg = '' + import re + g = re.search('

    403 Forbidden

    ([^<]+)<', body) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % body, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits + + + diff --git a/ext/boto/cloudsearch/sourceattribute.py b/ext/boto/cloudsearch/sourceattribute.py new file mode 100644 index 0000000000..2883314722 --- /dev/null +++ b/ext/boto/cloudsearch/sourceattribute.py @@ -0,0 +1,74 @@ +# Copyright (c) 202 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class SourceAttribute(object): + """ + Provide information about attributes for an index field. + A maximum of 20 source attributes can be configured for + each index field. + + :ivar default: Optional default value if the source attribute + is not specified in a document. + + :ivar name: The name of the document source field to add + to this ``IndexField``. + + :ivar data_function: Identifies the transformation to apply + when copying data from a source attribute. + + :ivar data_map: The value is a dict with the following keys: + * cases - A dict that translates source field values + to custom values. + * default - An optional default value to use if the + source attribute is not specified in a document. + * name - the name of the document source field to add + to this ``IndexField`` + :ivar data_trim_title: Trims common title words from a source + document attribute when populating an ``IndexField``. + This can be used to create an ``IndexField`` you can + use for sorting. The value is a dict with the following + fields: + * default - An optional default value. + * language - an IETF RFC 4646 language code. + * separator - The separator that follows the text to trim. + * name - The name of the document source field to add. + """ + + ValidDataFunctions = ('Copy', 'TrimTitle', 'Map') + + def __init__(self): + self.data_copy = {} + self._data_function = self.ValidDataFunctions[0] + self.data_map = {} + self.data_trim_title = {} + + @property + def data_function(self): + return self._data_function + + @data_function.setter + def data_function(self, value): + if value not in self.ValidDataFunctions: + valid = '|'.join(self.ValidDataFunctions) + raise ValueError('data_function must be one of: %s' % valid) + self._data_function = value diff --git a/ext/boto/cloudsearch2/__init__.py b/ext/boto/cloudsearch2/__init__.py new file mode 100644 index 0000000000..14e790abe5 --- /dev/null +++ b/ext/boto/cloudsearch2/__init__.py @@ -0,0 +1,39 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudsearch2.layer1 import CloudSearchConnection + return get_regions('cloudsearch', connection_cls=CloudSearchConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cloudsearch2.layer1 import CloudSearchConnection + return connect('cloudsearch', region_name, + connection_cls=CloudSearchConnection, **kw_params) diff --git a/ext/boto/cloudsearch2/document.py b/ext/boto/cloudsearch2/document.py new file mode 100644 index 0000000000..cfe5fe623d --- /dev/null +++ b/ext/boto/cloudsearch2/document.py @@ -0,0 +1,315 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.exception +from boto.compat import json +import requests +import boto +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + + +class SearchServiceException(Exception): + pass + + +class CommitMismatchError(Exception): + # Let's do some extra work and let the user handle errors on his/her own. + + errors = None + + +class EncodingError(Exception): + """ + Content sent for Cloud Search indexing was incorrectly encoded. + + This usually happens when a document is marked as unicode but non-unicode + characters are present. + """ + pass + + +class ContentTooLongError(Exception): + """ + Content sent for Cloud Search indexing was too long + + This will usually happen when documents queued for indexing add up to more + than the limit allowed per upload batch (5MB) + + """ + pass + + +class DocumentServiceConnection(object): + """ + A CloudSearch document service. + + The DocumentServiceConection is used to add, remove and update documents in + CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document + Format). + + To generate an appropriate SDF, use :func:`add` to add or update documents, + as well as :func:`delete` to remove documents. + + Once the set of documents is ready to be index, use :func:`commit` to send + the commands to CloudSearch. + + If there are a lot of documents to index, it may be preferable to split the + generation of SDF data and the actual uploading into CloudSearch. Retrieve + the current SDF with :func:`get_sdf`. If this file is the uploaded into S3, + it can be retrieved back afterwards for upload into CloudSearch using + :func:`add_sdf_from_s3`. + + The SDF is not cleared after a :func:`commit`. If you wish to continue + using the DocumentServiceConnection for another batch upload of commands, + you will need to :func:`clear_sdf` first to stop the previous batch of + commands from being uploaded again. + + """ + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + if not self.endpoint: + self.endpoint = domain.doc_service_endpoint + self.documents_batch = [] + self._sdf = None + + # Copy proxy settings from connection and check if request should be signed + self.proxy = {} + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.proxy = {'http': self.domain.layer1.get_proxy_url_with_auth()} + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + # Create a domain connection to send signed requests + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) + + def add(self, _id, fields): + """ + Add a document to be processed by the DocumentService + + The document will not actually be added until :func:`commit` is called + + :type _id: string + :param _id: A unique ID used to refer to this document. + + :type fields: dict + :param fields: A dictionary of key-value pairs to be uploaded . + """ + + d = {'type': 'add', 'id': _id, 'fields': fields} + self.documents_batch.append(d) + + def delete(self, _id): + """ + Schedule a document to be removed from the CloudSearch service + + The document will not actually be scheduled for removal until + :func:`commit` is called + + :type _id: string + :param _id: The unique ID of this document. + """ + + d = {'type': 'delete', 'id': _id} + self.documents_batch.append(d) + + def get_sdf(self): + """ + Generate the working set of documents in Search Data Format (SDF) + + :rtype: string + :returns: JSON-formatted string of the documents in SDF + """ + + return self._sdf if self._sdf else json.dumps(self.documents_batch) + + def clear_sdf(self): + """ + Clear the working documents from this DocumentServiceConnection + + This should be used after :func:`commit` if the connection will be + reused for another set of documents. + """ + + self._sdf = None + self.documents_batch = [] + + def add_sdf_from_s3(self, key_obj): + """ + Load an SDF from S3 + + Using this method will result in documents added through + :func:`add` and :func:`delete` being ignored. + + :type key_obj: :class:`boto.s3.key.Key` + :param key_obj: An S3 key which contains an SDF + """ + #@todo:: (lucas) would be nice if this could just take an s3://uri..." + + self._sdf = key_obj.get_contents_as_string() + + def _commit_with_auth(self, sdf, api_version): + return self.domain_connection.upload_documents(sdf, 'application/json') + + def _commit_without_auth(self, sdf, api_version): + url = "http://%s/%s/documents/batch" % (self.endpoint, api_version) + + # Keep-alive is automatic in a post-1.0 requests world. + session = requests.Session() + session.proxies = self.proxy + adapter = requests.adapters.HTTPAdapter( + pool_connections=20, + pool_maxsize=50, + max_retries=5 + ) + session.mount('http://', adapter) + session.mount('https://', adapter) + + resp = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) + return resp + + def commit(self): + """ + Actually send an SDF to CloudSearch for processing + + If an SDF file has been explicitly loaded it will be used. Otherwise, + documents added through :func:`add` and :func:`delete` will be used. + + :rtype: :class:`CommitResponse` + :returns: A summary of documents added and deleted + """ + + sdf = self.get_sdf() + + if ': null' in sdf: + boto.log.error('null value in sdf detected. This will probably ' + 'raise 500 error.') + index = sdf.index(': null') + boto.log.error(sdf[index - 100:index + 100]) + + api_version = '2013-01-01' + if self.domain and self.domain.layer1: + api_version = self.domain.layer1.APIVersion + + if self.sign_request: + r = self._commit_with_auth(sdf, api_version) + else: + r = self._commit_without_auth(sdf, api_version) + + return CommitResponse(r, self, sdf, signed_request=self.sign_request) + + +class CommitResponse(object): + """Wrapper for response to Cloudsearch document batch commit. + + :type response: :class:`requests.models.Response` + :param response: Response from Cloudsearch /documents/batch API + + :type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection` + :param doc_service: Object containing the documents posted and methods to + retry + + :raises: :class:`boto.exception.BotoServerError` + :raises: :class:`boto.cloudsearch2.document.SearchServiceException` + :raises: :class:`boto.cloudsearch2.document.EncodingError` + :raises: :class:`boto.cloudsearch2.document.ContentTooLongError` + """ + def __init__(self, response, doc_service, sdf, signed_request=False): + self.response = response + self.doc_service = doc_service + self.sdf = sdf + self.signed_request = signed_request + + if self.signed_request: + self.content = response + else: + _body = response.content.decode('utf-8') + + try: + self.content = json.loads(_body) + except: + boto.log.error('Error indexing documents.\nResponse Content:\n{0}' + '\n\nSDF:\n{1}'.format(_body, self.sdf)) + raise boto.exception.BotoServerError(self.response.status_code, '', + body=_body) + + self.status = self.content['status'] + if self.status == 'error': + self.errors = [e.get('message') for e in self.content.get('errors', + [])] + for e in self.errors: + if "Illegal Unicode character" in e: + raise EncodingError("Illegal Unicode character in document") + elif e == "The Content-Length is too long": + raise ContentTooLongError("Content was too long") + else: + self.errors = [] + + self.adds = self.content['adds'] + self.deletes = self.content['deletes'] + self._check_num_ops('add', self.adds) + self._check_num_ops('delete', self.deletes) + + def _check_num_ops(self, type_, response_num): + """Raise exception if number of ops in response doesn't match commit + + :type type_: str + :param type_: Type of commit operation: 'add' or 'delete' + + :type response_num: int + :param response_num: Number of adds or deletes in the response. + + :raises: :class:`boto.cloudsearch2.document.CommitMismatchError` + """ + commit_num = len([d for d in self.doc_service.documents_batch + if d['type'] == type_]) + + if response_num != commit_num: + if self.signed_request: + boto.log.debug(self.response) + else: + boto.log.debug(self.response.content) + # There will always be a commit mismatch error if there is any + # errors on cloudsearch. self.errors gets lost when this + # CommitMismatchError is raised. Whoever is using boto has no idea + # why their commit failed. They can't even notify the user of the + # cause by parsing the error messages from amazon. So let's + # attach the self.errors to the exceptions if we already spent + # time and effort collecting them out of the response. + exc = CommitMismatchError( + 'Incorrect number of {0}s returned. Commit: {1} Response: {2}' + .format(type_, commit_num, response_num) + ) + exc.errors = self.errors + raise exc diff --git a/ext/boto/cloudsearch2/domain.py b/ext/boto/cloudsearch2/domain.py new file mode 100644 index 0000000000..956af216d8 --- /dev/null +++ b/ext/boto/cloudsearch2/domain.py @@ -0,0 +1,542 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch2.optionstatus import IndexFieldStatus +from boto.cloudsearch2.optionstatus import ServicePoliciesStatus +from boto.cloudsearch2.optionstatus import ExpressionStatus +from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus +from boto.cloudsearch2.optionstatus import ScalingParametersStatus +from boto.cloudsearch2.document import DocumentServiceConnection +from boto.cloudsearch2.search import SearchConnection + + +def handle_bool(value): + if value in [True, 'true', 'True', 'TRUE', 1]: + return True + return False + + +class Domain(object): + """ + A Cloudsearch domain. + + :ivar name: The name of the domain. + + :ivar id: The internally generated unique identifier for the domain. + + :ivar created: A boolean which is True if the domain is + created. It can take several minutes to initialize a domain + when CreateDomain is called. Newly created search domains are + returned with a False value for Created until domain creation + is complete + + :ivar deleted: A boolean which is True if the search domain has + been deleted. The system must clean up resources dedicated to + the search domain when delete is called. Newly deleted + search domains are returned from list_domains with a True + value for deleted for several minutes until resource cleanup + is complete. + + :ivar processing: True if processing is being done to activate the + current domain configuration. + + :ivar num_searchable_docs: The number of documents that have been + submittted to the domain and indexed. + + :ivar requires_index_document: True if index_documents needs to be + called to activate the current domain configuration. + + :ivar search_instance_count: The number of search instances that are + available to process search requests. + + :ivar search_instance_type: The instance type that is being used to + process search requests. + + :ivar search_partition_count: The number of partitions across which + the search index is spread. + """ + + def __init__(self, layer1, data): + """ + Constructor - Create a domain object from a layer1 and data params + + :type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object + :param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object + which is used to perform operations on the domain. + """ + self.layer1 = layer1 + self.update_from_data(data) + + def update_from_data(self, data): + self.created = data['Created'] + self.deleted = data['Deleted'] + self.processing = data['Processing'] + self.requires_index_documents = data['RequiresIndexDocuments'] + self.domain_id = data['DomainId'] + self.domain_name = data['DomainName'] + self.search_instance_count = data['SearchInstanceCount'] + self.search_instance_type = data.get('SearchInstanceType', None) + self.search_partition_count = data['SearchPartitionCount'] + self._doc_service = data['DocService'] + self._service_arn = data['ARN'] + self._search_service = data['SearchService'] + + @property + def service_arn(self): + return self._service_arn + + @property + def doc_service_endpoint(self): + return self._doc_service['Endpoint'] + + @property + def search_service_endpoint(self): + return self._search_service['Endpoint'] + + @property + def created(self): + return self._created + + @created.setter + def created(self, value): + self._created = handle_bool(value) + + @property + def deleted(self): + return self._deleted + + @deleted.setter + def deleted(self, value): + self._deleted = handle_bool(value) + + @property + def processing(self): + return self._processing + + @processing.setter + def processing(self, value): + self._processing = handle_bool(value) + + @property + def requires_index_documents(self): + return self._requires_index_documents + + @requires_index_documents.setter + def requires_index_documents(self, value): + self._requires_index_documents = handle_bool(value) + + @property + def search_partition_count(self): + return self._search_partition_count + + @search_partition_count.setter + def search_partition_count(self, value): + self._search_partition_count = int(value) + + @property + def search_instance_count(self): + return self._search_instance_count + + @search_instance_count.setter + def search_instance_count(self, value): + self._search_instance_count = int(value) + + @property + def name(self): + return self.domain_name + + @property + def id(self): + return self.domain_id + + def delete(self): + """ + Delete this domain and all index data associated with it. + """ + return self.layer1.delete_domain(self.name) + + def get_analysis_schemes(self): + """ + Return a list of Analysis Scheme objects. + """ + return self.layer1.describe_analysis_schemes(self.name) + + def get_availability_options(self): + """ + Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object representing the currently defined availability options for + the domain. + :return: OptionsStatus object + :rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus` + object + """ + return AvailabilityOptionsStatus( + self, refresh_fn=self.layer1.describe_availability_options, + refresh_key=['DescribeAvailabilityOptionsResponse', + 'DescribeAvailabilityOptionsResult', + 'AvailabilityOptions'], + save_fn=self.layer1.update_availability_options) + + def get_scaling_options(self): + """ + Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object representing the currently defined scaling options for the + domain. + :return: ScalingParametersStatus object + :rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus` + object + """ + return ScalingParametersStatus( + self, refresh_fn=self.layer1.describe_scaling_parameters, + refresh_key=['DescribeScalingParametersResponse', + 'DescribeScalingParametersResult', + 'ScalingParameters'], + save_fn=self.layer1.update_scaling_parameters) + + def get_access_policies(self): + """ + Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus` + object representing the currently defined access policies for the + domain. + :return: ServicePoliciesStatus object + :rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object + """ + return ServicePoliciesStatus( + self, refresh_fn=self.layer1.describe_service_access_policies, + refresh_key=['DescribeServiceAccessPoliciesResponse', + 'DescribeServiceAccessPoliciesResult', + 'AccessPolicies'], + save_fn=self.layer1.update_service_access_policies) + + def index_documents(self): + """ + Tells the search domain to start indexing its documents using + the latest text processing options and IndexFields. This + operation must be invoked to make options whose OptionStatus + has OptionState of RequiresIndexDocuments visible in search + results. + """ + self.layer1.index_documents(self.name) + + def get_index_fields(self, field_names=None): + """ + Return a list of index fields defined for this domain. + :return: list of IndexFieldStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus` + object + """ + data = self.layer1.describe_index_fields(self.name, field_names) + + data = (data['DescribeIndexFieldsResponse'] + ['DescribeIndexFieldsResult'] + ['IndexFields']) + + return [IndexFieldStatus(self, d) for d in data] + + def create_index_field(self, field_name, field_type, + default='', facet=False, returnable=False, + searchable=False, sortable=False, + highlight=False, source_field=None, + analysis_scheme=None): + """ + Defines an ``IndexField``, either replacing an existing + definition or creating a new one. + + :type field_name: string + :param field_name: The name of a field in the search index. + + :type field_type: string + :param field_type: The type of field. Valid values are + int | double | literal | text | date | latlon | + int-array | double-array | literal-array | text-array | date-array + + :type default: string or int + :param default: The default value for the field. If the + field is of type ``int`` this should be an integer value. + Otherwise, it's a string. + + :type facet: bool + :param facet: A boolean to indicate whether facets + are enabled for this field or not. Does not apply to + fields of type ``int, int-array, text, text-array``. + + :type returnable: bool + :param returnable: A boolean to indicate whether values + of this field can be returned in search results or + used in ranking. + + :type searchable: bool + :param searchable: A boolean to indicate whether search + is enabled for this field or not. + + :type sortable: bool + :param sortable: A boolean to indicate whether sorting + is enabled for this field or not. Does not apply to + fields of array types. + + :type highlight: bool + :param highlight: A boolean to indicate whether highlighting + is enabled for this field or not. Does not apply to + fields of type ``double, int, date, latlon`` + + :type source_field: list of strings or string + :param source_field: For array types, this is the list of fields + to treat as the source. For singular types, pass a string only. + + :type analysis_scheme: string + :param analysis_scheme: The analysis scheme to use for this field. + Only applies to ``text | text-array`` field types + + :return: IndexFieldStatus objects + :rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + index = { + 'IndexFieldName': field_name, + 'IndexFieldType': field_type + } + if field_type == 'literal': + index['LiteralOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LiteralOptions']['DefaultValue'] = default + if source_field: + index['LiteralOptions']['SourceField'] = source_field + elif field_type == 'literal-array': + index['LiteralArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['LiteralArrayOptions']['DefaultValue'] = default + if source_field: + index['LiteralArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'int': + index['IntOptions'] = { + 'DefaultValue': default, + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['IntOptions']['DefaultValue'] = default + if source_field: + index['IntOptions']['SourceField'] = source_field + elif field_type == 'int-array': + index['IntArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['IntArrayOptions']['DefaultValue'] = default + if source_field: + index['IntArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'date': + index['DateOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DateOptions']['DefaultValue'] = default + if source_field: + index['DateOptions']['SourceField'] = source_field + elif field_type == 'date-array': + index['DateArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DateArrayOptions']['DefaultValue'] = default + if source_field: + index['DateArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'double': + index['DoubleOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['DoubleOptions']['DefaultValue'] = default + if source_field: + index['DoubleOptions']['SourceField'] = source_field + elif field_type == 'double-array': + index['DoubleArrayOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable + } + if default: + index['DoubleArrayOptions']['DefaultValue'] = default + if source_field: + index['DoubleArrayOptions']['SourceFields'] = \ + ','.join(source_field) + elif field_type == 'text': + index['TextOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight, + 'SortEnabled': sortable + } + if default: + index['TextOptions']['DefaultValue'] = default + if source_field: + index['TextOptions']['SourceField'] = source_field + if analysis_scheme: + index['TextOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'text-array': + index['TextArrayOptions'] = { + 'ReturnEnabled': returnable, + 'HighlightEnabled': highlight + } + if default: + index['TextArrayOptions']['DefaultValue'] = default + if source_field: + index['TextArrayOptions']['SourceFields'] = \ + ','.join(source_field) + if analysis_scheme: + index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme + elif field_type == 'latlon': + index['LatLonOptions'] = { + 'FacetEnabled': facet, + 'ReturnEnabled': returnable, + 'SearchEnabled': searchable, + 'SortEnabled': sortable + } + if default: + index['LatLonOptions']['DefaultValue'] = default + if source_field: + index['LatLonOptions']['SourceField'] = source_field + + data = self.layer1.define_index_field(self.name, index) + + data = (data['DefineIndexFieldResponse'] + ['DefineIndexFieldResult'] + ['IndexField']) + + return IndexFieldStatus(self, data, + self.layer1.describe_index_fields) + + def get_expressions(self, names=None): + """ + Return a list of rank expressions defined for this domain. + :return: list of ExpressionStatus objects + :rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus` + object + """ + fn = self.layer1.describe_expressions + data = fn(self.name, names) + + data = (data['DescribeExpressionsResponse'] + ['DescribeExpressionsResult'] + ['Expressions']) + + return [ExpressionStatus(self, d, fn) for d in data] + + def create_expression(self, name, value): + """ + Create a new expression. + + :type name: string + :param name: The name of an expression for processing + during a search request. + + :type value: string + :param value: The expression to evaluate for ranking + or thresholding while processing a search request. The + Expression syntax is based on JavaScript expressions + and supports: + + * Single value, sort enabled numeric fields (int, double, date) + * Other expressions + * The _score variable, which references a document's relevance + score + * The _time variable, which references the current epoch time + * Integer, floating point, hex, and octal literals + * Arithmetic operators: + - * / % + * Bitwise operators: | & ^ ~ << >> >>> + * Boolean operators (including the ternary operator): && || ! ?: + * Comparison operators: < <= == >= > + * Mathematical functions: abs ceil exp floor ln log2 log10 logn + max min pow sqrt pow + * Trigonometric functions: acos acosh asin asinh atan atan2 atanh + cos cosh sin sinh tanh tan + * The haversin distance function + + Expressions always return an integer value from 0 to the maximum + 64-bit signed integer value (2^63 - 1). Intermediate results are + calculated as double-precision floating point values and the return + value is rounded to the nearest integer. If the expression is + invalid or evaluates to a negative value, it returns 0. If the + expression evaluates to a value greater than the maximum, it + returns the maximum value. + + The source data for an Expression can be the name of an + IndexField of type int or double, another Expression or the + reserved name _score. The _score source is + defined to return as a double from 0 to 10.0 (inclusive) to + indicate how relevant a document is to the search request, + taking into account repetition of search terms in the + document and proximity of search terms to each other in + each matching IndexField in the document. + + For more information about using rank expressions to + customize ranking, see the Amazon CloudSearch Developer + Guide. + + :return: ExpressionStatus object + :rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object + + :raises: BaseException, InternalException, LimitExceededException, + InvalidTypeException, ResourceNotFoundException + """ + data = self.layer1.define_expression(self.name, name, value) + + data = (data['DefineExpressionResponse'] + ['DefineExpressionResult'] + ['Expression']) + + return ExpressionStatus(self, data, + self.layer1.describe_expressions) + + def get_document_service(self): + return DocumentServiceConnection(domain=self) + + def get_search_service(self): + return SearchConnection(domain=self) + + def __repr__(self): + return '' % self.domain_name diff --git a/ext/boto/cloudsearch2/exceptions.py b/ext/boto/cloudsearch2/exceptions.py new file mode 100644 index 0000000000..c114113963 --- /dev/null +++ b/ext/boto/cloudsearch2/exceptions.py @@ -0,0 +1,46 @@ +""" +Exceptions that are specific to the cloudsearch2 module. +""" +from boto.exception import BotoServerError + + +class InvalidTypeException(BotoServerError): + """ + Raised when an invalid record type is passed to CloudSearch. + """ + pass + + +class LimitExceededException(BotoServerError): + """ + Raised when a limit has been exceeded. + """ + pass + + +class InternalException(BotoServerError): + """ + A generic server-side error. + """ + pass + + +class DisabledOperationException(BotoServerError): + """ + Raised when an operation has been disabled. + """ + pass + + +class ResourceNotFoundException(BotoServerError): + """ + Raised when a requested resource does not exist. + """ + pass + + +class BaseException(BotoServerError): + """ + A generic server-side error. + """ + pass diff --git a/ext/boto/cloudsearch2/layer1.py b/ext/boto/cloudsearch2/layer1.py new file mode 100644 index 0000000000..a39b08f585 --- /dev/null +++ b/ext/boto/cloudsearch2/layer1.py @@ -0,0 +1,783 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudsearch2 import exceptions + + +class CloudSearchConnection(AWSQueryConnection): + """ + Amazon CloudSearch Configuration Service + You use the Amazon CloudSearch configuration service to create, + configure, and manage search domains. Configuration service + requests are submitted using the AWS Query protocol. AWS Query + requests are HTTP or HTTPS requests submitted via HTTP GET or POST + with a query parameter named Action. + + The endpoint for configuration service requests is region- + specific: cloudsearch. region .amazonaws.com. For example, + cloudsearch.us-east-1.amazonaws.com. For a current list of + supported regions and endpoints, see `Regions and Endpoints`_. + """ + APIVersion = "2013-01-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidTypeException": exceptions.InvalidTypeException, + "LimitExceededException": exceptions.LimitExceededException, + "InternalException": exceptions.InternalException, + "DisabledOperationException": exceptions.DisabledOperationException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "BaseException": exceptions.BaseException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + sign_request = kwargs.pop('sign_request', False) + self.sign_request = sign_request + + super(CloudSearchConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_suggesters(self, domain_name): + """ + Indexes the search suggestions. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='BuildSuggesters', + verb='POST', + path='/', params=params) + + def create_domain(self, domain_name): + """ + Creates a new search domain. For more information, see + `Creating a Search Domain`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A name for the domain you are creating. Allowed + characters are a-z (lower-case letters), 0-9, and hyphen (-). + Domain names must start with a letter or number and be at least 3 + and no more than 28 characters long. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='CreateDomain', + verb='POST', + path='/', params=params) + + def define_analysis_scheme(self, domain_name, analysis_scheme): + """ + Configures an analysis scheme that can be applied to a `text` + or `text-array` field to define language-specific text + processing options. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme: dict + :param analysis_scheme: Configuration information for an analysis + scheme. Each analysis scheme has a unique name and specifies the + language of the text to be processed. The following options can be + configured for an analysis scheme: `Synonyms`, `Stopwords`, + `StemmingDictionary`, and `AlgorithmicStemming`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'AnalysisScheme', + analysis_scheme) + return self._make_request( + action='DefineAnalysisScheme', + verb='POST', + path='/', params=params) + + def define_expression(self, domain_name, expression): + """ + Configures an `Expression` for the search domain. Used to + create new expressions and modify existing ones. If the + expression exists, the new configuration replaces the old one. + For more information, see `Configuring Expressions`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression: dict + :param expression: A named expression that can be evaluated at search + time. Can be used to sort the search results, define other + expressions, or return computed information in the search results. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Expression', + expression) + return self._make_request( + action='DefineExpression', + verb='POST', + path='/', params=params) + + def define_index_field(self, domain_name, index_field): + """ + Configures an `IndexField` for the search domain. Used to + create new fields and modify existing ones. You must specify + the name of the domain you are configuring and an index field + configuration. The index field configuration specifies a + unique name, the index field type, and the options you want to + configure for the field. The options you can specify depend on + the `IndexFieldType`. If the field exists, the new + configuration replaces the old one. For more information, see + `Configuring Index Fields`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field: dict + :param index_field: The index field and field options you want to + configure. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'IndexField', + index_field) + return self._make_request( + action='DefineIndexField', + verb='POST', + path='/', params=params) + + def define_suggester(self, domain_name, suggester): + """ + Configures a suggester for a domain. A suggester enables you + to display possible matches before users finish typing their + queries. When you configure a suggester, you must specify the + name of the text field you want to search for possible matches + and a unique name for the suggester. For more information, see + `Getting Search Suggestions`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester: dict + :param suggester: Configuration information for a search suggester. + Each suggester has a unique name and specifies the text field you + want to use for suggestions. The following options can be + configured for a suggester: `FuzzyMatching`, `SortExpression`. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'Suggester', + suggester) + return self._make_request( + action='DefineSuggester', + verb='POST', + path='/', params=params) + + def delete_analysis_scheme(self, domain_name, analysis_scheme_name): + """ + Deletes an analysis scheme. For more information, see + `Configuring Analysis Schemes`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type analysis_scheme_name: string + :param analysis_scheme_name: The name of the analysis scheme you want + to delete. + + """ + params = { + 'DomainName': domain_name, + 'AnalysisSchemeName': analysis_scheme_name, + } + return self._make_request( + action='DeleteAnalysisScheme', + verb='POST', + path='/', params=params) + + def delete_domain(self, domain_name): + """ + Permanently deletes a search domain and all of its data. Once + a domain has been deleted, it cannot be recovered. For more + information, see `Deleting a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to permanently + delete. + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DeleteDomain', + verb='POST', + path='/', params=params) + + def delete_expression(self, domain_name, expression_name): + """ + Removes an `Expression` from the search domain. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type expression_name: string + :param expression_name: The name of the `Expression` to delete. + + """ + params = { + 'DomainName': domain_name, + 'ExpressionName': expression_name, + } + return self._make_request( + action='DeleteExpression', + verb='POST', + path='/', params=params) + + def delete_index_field(self, domain_name, index_field_name): + """ + Removes an `IndexField` from the search domain. For more + information, see `Configuring Index Fields`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type index_field_name: string + :param index_field_name: The name of the index field your want to + remove from the domain's indexing options. + + """ + params = { + 'DomainName': domain_name, + 'IndexFieldName': index_field_name, + } + return self._make_request( + action='DeleteIndexField', + verb='POST', + path='/', params=params) + + def delete_suggester(self, domain_name, suggester_name): + """ + Deletes a suggester. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type suggester_name: string + :param suggester_name: Specifies the name of the suggester you want to + delete. + + """ + params = { + 'DomainName': domain_name, + 'SuggesterName': suggester_name, + } + return self._make_request( + action='DeleteSuggester', + verb='POST', + path='/', params=params) + + def describe_analysis_schemes(self, domain_name, + analysis_scheme_names=None, deployed=None): + """ + Gets the analysis schemes configured for a domain. An analysis + scheme defines language-specific text processing options for a + `text` field. Can be limited to specific analysis schemes by + name. By default, shows all analysis schemes and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Configuring + Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type analysis_scheme_names: list + :param analysis_scheme_names: The analysis schemes you want to + describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if analysis_scheme_names is not None: + self.build_list_params(params, + analysis_scheme_names, + 'AnalysisSchemeNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAnalysisSchemes', + verb='POST', + path='/', params=params) + + def describe_availability_options(self, domain_name, deployed=None): + """ + Gets the availability options configured for a domain. By + default, shows the configuration with any pending changes. Set + the `Deployed` option to `True` to show the active + configuration and exclude pending changes. For more + information, see `Configuring Availability Options`_ in the + Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeAvailabilityOptions', + verb='POST', + path='/', params=params) + + def describe_domains(self, domain_names=None): + """ + Gets information about the search domains owned by this + account. Can be limited to specific domains. Shows all domains + by default. To get the number of searchable documents in a + domain, use the console or submit a `matchall` request to your + domain's search endpoint: + `q=matchall&q.parser=structured&size=0`. For more information, + see `Getting Information about a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_names: list + :param domain_names: The names of the domains you want to include in + the response. + + """ + params = {} + if domain_names is not None: + self.build_list_params(params, + domain_names, + 'DomainNames.member') + return self._make_request( + action='DescribeDomains', + verb='POST', + path='/', params=params) + + def describe_expressions(self, domain_name, expression_names=None, + deployed=None): + """ + Gets the expressions configured for the search domain. Can be + limited to specific expressions by name. By default, shows all + expressions and includes any pending changes to the + configuration. Set the `Deployed` option to `True` to show the + active configuration and exclude pending changes. For more + information, see `Configuring Expressions`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type expression_names: list + :param expression_names: Limits the `DescribeExpressions` response to + the specified expressions. If not specified, all expressions are + shown. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if expression_names is not None: + self.build_list_params(params, + expression_names, + 'ExpressionNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeExpressions', + verb='POST', + path='/', params=params) + + def describe_index_fields(self, domain_name, field_names=None, + deployed=None): + """ + Gets information about the index fields configured for the + search domain. Can be limited to specific fields by name. By + default, shows all fields and includes any pending changes to + the configuration. Set the `Deployed` option to `True` to show + the active configuration and exclude pending changes. For more + information, see `Getting Domain Information`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type field_names: list + :param field_names: A list of the index fields you want to describe. If + not specified, information is returned for all configured index + fields. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if field_names is not None: + self.build_list_params(params, + field_names, + 'FieldNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeIndexFields', + verb='POST', + path='/', params=params) + + def describe_scaling_parameters(self, domain_name): + """ + Gets the scaling parameters configured for a domain. A + domain's scaling parameters specify the desired search + instance type and replication count. For more information, see + `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='DescribeScalingParameters', + verb='POST', + path='/', params=params) + + def describe_service_access_policies(self, domain_name, deployed=None): + """ + Gets information about the access policies that control access + to the domain's document and search endpoints. By default, + shows the configuration with any pending changes. Set the + `Deployed` option to `True` to show the active configuration + and exclude pending changes. For more information, see + `Configuring Access for a Search Domain`_ in the Amazon + CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def describe_suggesters(self, domain_name, suggester_names=None, + deployed=None): + """ + Gets the suggesters configured for a domain. A suggester + enables you to display possible matches before users finish + typing their queries. Can be limited to specific suggesters by + name. By default, shows all suggesters and includes any + pending changes to the configuration. Set the `Deployed` + option to `True` to show the active configuration and exclude + pending changes. For more information, see `Getting Search + Suggestions`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: The name of the domain you want to describe. + + :type suggester_names: list + :param suggester_names: The suggesters you want to describe. + + :type deployed: boolean + :param deployed: Whether to display the deployed configuration ( + `True`) or include any pending changes ( `False`). Defaults to + `False`. + + """ + params = {'DomainName': domain_name, } + if suggester_names is not None: + self.build_list_params(params, + suggester_names, + 'SuggesterNames.member') + if deployed is not None: + params['Deployed'] = str( + deployed).lower() + return self._make_request( + action='DescribeSuggesters', + verb='POST', + path='/', params=params) + + def index_documents(self, domain_name): + """ + Tells the search domain to start indexing its documents using + the latest indexing options. This operation must be invoked to + activate options whose OptionStatus is + `RequiresIndexDocuments`. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + """ + params = {'DomainName': domain_name, } + return self._make_request( + action='IndexDocuments', + verb='POST', + path='/', params=params) + + def list_domain_names(self): + """ + Lists all search domains owned by an account. + """ + params = {} + return self._make_request( + action='ListDomainNames', + verb='POST', + path='/', params=params) + + def update_availability_options(self, domain_name, multi_az): + """ + Configures the availability options for a domain. Enabling the + Multi-AZ option expands an Amazon CloudSearch domain to an + additional Availability Zone in the same Region to increase + fault tolerance in the event of a service disruption. Changes + to the Multi-AZ option can take about half an hour to become + active. For more information, see `Configuring Availability + Options`_ in the Amazon CloudSearch Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type multi_az: boolean + :param multi_az: You expand an existing search domain to a second + Availability Zone by setting the Multi-AZ option to true. + Similarly, you can turn off the Multi-AZ option to downgrade the + domain to a single Availability Zone by setting the Multi-AZ option + to `False`. + + """ + params = {'DomainName': domain_name, 'MultiAZ': multi_az, } + return self._make_request( + action='UpdateAvailabilityOptions', + verb='POST', + path='/', params=params) + + def update_scaling_parameters(self, domain_name, scaling_parameters): + """ + Configures scaling parameters for a domain. A domain's scaling + parameters specify the desired search instance type and + replication count. Amazon CloudSearch will still automatically + scale your domain based on the volume of data and traffic, but + not below the desired instance type and replication count. If + the Multi-AZ option is enabled, these values control the + resources used per Availability Zone. For more information, + see `Configuring Scaling Options`_ in the Amazon CloudSearch + Developer Guide . + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type scaling_parameters: dict + :param scaling_parameters: The desired instance type and desired number + of replicas of each index partition. + + """ + params = {'DomainName': domain_name, } + self.build_complex_param(params, 'ScalingParameters', + scaling_parameters) + return self._make_request( + action='UpdateScalingParameters', + verb='POST', + path='/', params=params) + + def update_service_access_policies(self, domain_name, access_policies): + """ + Configures the access rules that control access to the + domain's document and search endpoints. For more information, + see ` Configuring Access for an Amazon CloudSearch Domain`_. + + :type domain_name: string + :param domain_name: A string that represents the name of a domain. + Domain names are unique across the domains owned by an account + within an AWS region. Domain names start with a letter or number + and can contain the following characters: a-z (lowercase), 0-9, and + - (hyphen). + + :type access_policies: string + :param access_policies: The access rules you want to configure. These + rules replace any existing rules. + + """ + params = { + 'DomainName': domain_name, + 'AccessPolicies': access_policies, + } + return self._make_request( + action='UpdateServiceAccessPolicies', + verb='POST', + path='/', params=params) + + def build_complex_param(self, params, label, value): + """Serialize a structure. + + For example:: + + param_type = 'structure' + label = 'IndexField' + value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}} + + would result in the params dict being updated with these params:: + + IndexField.IndexFieldName = a + IndexField.IntOptions.DefaultValue = 5 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type label: str + :param label: String label for param key + + :type value: any + :param value: The value to serialize + """ + for k, v in value.items(): + if isinstance(v, dict): + for k2, v2 in v.items(): + self.build_complex_param(params, label + '.' + k, v) + elif isinstance(v, bool): + params['%s.%s' % (label, k)] = v and 'true' or 'false' + else: + params['%s.%s' % (label, k)] = v + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/cloudsearch2/layer2.py b/ext/boto/cloudsearch2/layer2.py new file mode 100644 index 0000000000..28fdc74c63 --- /dev/null +++ b/ext/boto/cloudsearch2/layer2.py @@ -0,0 +1,94 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.cloudsearch2.layer1 import CloudSearchConnection +from boto.cloudsearch2.domain import Domain +from boto.compat import six + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host=None, debug=0, session_token=None, region=None, + validate_certs=True, sign_request=False): + + if isinstance(region, six.string_types): + import boto.cloudsearch2 + for region_info in boto.cloudsearch2.regions(): + if region_info.name == region: + region = region_info + break + + self.layer1 = CloudSearchConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + is_secure=is_secure, + port=port, + proxy=proxy, + proxy_port=proxy_port, + host=host, + debug=debug, + security_token=session_token, + region=region, + validate_certs=validate_certs, + sign_request=sign_request) + + def list_domains(self, domain_names=None): + """ + Return a list of objects for each domain defined in the + current account. + :rtype: list of :class:`boto.cloudsearch2.domain.Domain` + """ + domain_data = self.layer1.describe_domains(domain_names) + + domain_data = (domain_data['DescribeDomainsResponse'] + ['DescribeDomainsResult'] + ['DomainStatusList']) + + return [Domain(self.layer1, data) for data in domain_data] + + def create_domain(self, domain_name): + """ + Create a new CloudSearch domain and return the corresponding object. + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + data = self.layer1.create_domain(domain_name) + return Domain(self.layer1, data['CreateDomainResponse'] + ['CreateDomainResult'] + ['DomainStatus']) + + def lookup(self, domain_name): + """ + Lookup a single domain + :param domain_name: The name of the domain to look up + :type domain_name: str + + :return: Domain object, or None if the domain isn't found + :rtype: :class:`boto.cloudsearch2.domain.Domain` + """ + domains = self.list_domains(domain_names=[domain_name]) + if len(domains) > 0: + return domains[0] diff --git a/ext/boto/cloudsearch2/optionstatus.py b/ext/boto/cloudsearch2/optionstatus.py new file mode 100644 index 0000000000..0a45bea4f0 --- /dev/null +++ b/ext/boto/cloudsearch2/optionstatus.py @@ -0,0 +1,233 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.compat import json + + +class OptionStatus(dict): + """ + Presents a combination of status field (defined below) which are + accessed as attributes and option values which are stored in the + native Python dictionary. In this class, the option values are + merged from a JSON object that is stored as the Option part of + the object. + + :ivar domain_name: The name of the domain this option is associated with. + :ivar create_date: A timestamp for when this option was created. + :ivar state: The state of processing a change to an option. + Possible values: + + * RequiresIndexDocuments: the option's latest value will not + be visible in searches until IndexDocuments has been called + and indexing is complete. + * Processing: the option's latest value is not yet visible in + all searches but is in the process of being activated. + * Active: the option's latest value is completely visible. + + :ivar update_date: A timestamp for when this option was updated. + :ivar update_version: A unique integer that indicates when this + option was last updated. + """ + + def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None, + save_fn=None): + self.domain = domain + self.refresh_fn = refresh_fn + self.refresh_key = refresh_key + self.save_fn = save_fn + self.refresh(data) + + def _update_status(self, status): + self.creation_date = status['CreationDate'] + self.status = status['State'] + self.update_date = status['UpdateDate'] + self.update_version = int(status['UpdateVersion']) + + def _update_options(self, options): + if options: + self.update(options) + + def refresh(self, data=None): + """ + Refresh the local state of the object. You can either pass + new state data in as the parameter ``data`` or, if that parameter + is omitted, the state data will be retrieved from CloudSearch. + """ + if not data: + if self.refresh_fn: + data = self.refresh_fn(self.domain.name) + + if data and self.refresh_key: + # Attempt to pull out the right nested bag of data + for key in self.refresh_key: + data = data[key] + if data: + self._update_status(data['Status']) + self._update_options(data['Options']) + + def to_json(self): + """ + Return the JSON representation of the options as a string. + """ + return json.dumps(self) + + def save(self): + """ + Write the current state of the local object back to the + CloudSearch service. + """ + if self.save_fn: + data = self.save_fn(self.domain.name, self.to_json()) + self.refresh(data) + + +class IndexFieldStatus(OptionStatus): + def save(self): + pass + + +class AvailabilityOptionsStatus(OptionStatus): + def save(self): + pass + + +class ScalingParametersStatus(IndexFieldStatus): + pass + + +class ExpressionStatus(IndexFieldStatus): + pass + + +class ServicePoliciesStatus(OptionStatus): + + def new_statement(self, arn, ip): + """ + Returns a new policy statement that will allow + access to the service described by ``arn`` by the + ip specified in ``ip``. + + :type arn: string + :param arn: The Amazon Resource Notation identifier for the + service you wish to provide access to. This would be + either the search service or the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + return { + "Effect": "Allow", + "Action": "*", # Docs say use GET, but denies unless * + "Resource": arn, + "Condition": { + "IpAddress": { + "aws:SourceIp": [ip] + } + } + } + + def _allow_ip(self, arn, ip): + if 'Statement' not in self: + s = self.new_statement(arn, ip) + self['Statement'] = [s] + self.save() + else: + add_statement = True + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + add_statement = False + condition = statement['Condition'][condition_name] + if ip not in condition['aws:SourceIp']: + condition['aws:SourceIp'].append(ip) + + if add_statement: + s = self.new_statement(arn, ip) + self['Statement'].append(s) + self.save() + + def allow_search_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def allow_doc_ip(self, ip): + """ + Add the provided ip address or CIDR block to the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._allow_ip(arn, ip) + + def _disallow_ip(self, arn, ip): + if 'Statement' not in self: + return + need_update = False + for statement in self['Statement']: + if statement['Resource'] == arn: + for condition_name in statement['Condition']: + if condition_name == 'IpAddress': + condition = statement['Condition'][condition_name] + if ip in condition['aws:SourceIp']: + condition['aws:SourceIp'].remove(ip) + need_update = True + if need_update: + self.save() + + def disallow_search_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the search service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) + + def disallow_doc_ip(self, ip): + """ + Remove the provided ip address or CIDR block from the list of + allowable address for the document service. + + :type ip: string + :param ip: An IP address or CIDR block you wish to grant access + to. + """ + arn = self.domain.service_arn + self._disallow_ip(arn, ip) diff --git a/ext/boto/cloudsearch2/search.py b/ext/boto/cloudsearch2/search.py new file mode 100644 index 0000000000..3db3a472e1 --- /dev/null +++ b/ext/boto/cloudsearch2/search.py @@ -0,0 +1,452 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from math import ceil +from boto.compat import json, map, six +import requests +from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + +SIMPLE = 'simple' +STRUCTURED = 'structured' +LUCENE = 'lucene' +DISMAX = 'dismax' + + +class SearchServiceException(Exception): + pass + + +class SearchResults(object): + def __init__(self, **attrs): + self.rid = attrs['status']['rid'] + self.time_ms = attrs['status']['time-ms'] + self.hits = attrs['hits']['found'] + self.docs = attrs['hits']['hit'] + self.start = attrs['hits']['start'] + self.query = attrs['query'] + self.search_service = attrs['search_service'] + + self.facets = {} + if 'facets' in attrs: + for (facet, values) in attrs['facets'].items(): + if 'buckets' in values: + self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values.get('buckets', []))) + + self.num_pages_needed = ceil(self.hits / self.query.real_size) + + def __len__(self): + return len(self.docs) + + def __iter__(self): + return iter(self.docs) + + def next_page(self): + """Call Cloudsearch to get the next page of search results + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: the following page of search results + """ + if self.query.page <= self.num_pages_needed: + self.query.start += self.query.real_size + self.query.page += 1 + return self.search_service(self.query) + else: + raise StopIteration + + +class Query(object): + + RESULTS_PER_PAGE = 500 + + def __init__(self, q=None, parser=None, fq=None, expr=None, + return_fields=None, size=10, start=0, sort=None, + facet=None, highlight=None, partial=None, options=None): + + self.q = q + self.parser = parser + self.fq = fq + self.expr = expr or {} + self.sort = sort or [] + self.return_fields = return_fields or [] + self.start = start + self.facet = facet or {} + self.highlight = highlight or {} + self.partial = partial + self.options = options + self.page = 0 + self.update_size(size) + + def update_size(self, new_size): + self.size = new_size + self.real_size = Query.RESULTS_PER_PAGE if (self.size > + Query.RESULTS_PER_PAGE or self.size == 0) else self.size + + def to_params(self): + """Transform search parameters from instance properties to a dictionary + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['q.parser'] = self.parser + + if self.fq: + params['fq'] = self.fq + + if self.expr: + for k, v in six.iteritems(self.expr): + params['expr.%s' % k] = v + + if self.facet: + for k, v in six.iteritems(self.facet): + if not isinstance(v, six.string_types): + v = json.dumps(v) + params['facet.%s' % k] = v + + if self.highlight: + for k, v in six.iteritems(self.highlight): + params['highlight.%s' % k] = v + + if self.options: + params['q.options'] = self.options + + if self.return_fields: + params['return'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + def to_domain_connection_params(self): + """ + Transform search parameters from instance properties to a dictionary + that CloudSearchDomainConnection can accept + + :rtype: dict + :return: search parameters + """ + params = {'start': self.start, 'size': self.real_size} + + if self.q: + params['q'] = self.q + + if self.parser: + params['query_parser'] = self.parser + + if self.fq: + params['filter_query'] = self.fq + + if self.expr: + expr = {} + for k, v in six.iteritems(self.expr): + expr['expr.%s' % k] = v + + params['expr'] = expr + + if self.facet: + facet = {} + for k, v in six.iteritems(self.facet): + if not isinstance(v, six.string_types): + v = json.dumps(v) + facet['facet.%s' % k] = v + + params['facet'] = facet + + if self.highlight: + highlight = {} + for k, v in six.iteritems(self.highlight): + highlight['highlight.%s' % k] = v + + params['highlight'] = highlight + + if self.options: + params['query_options'] = self.options + + if self.return_fields: + params['ret'] = ','.join(self.return_fields) + + if self.partial is not None: + params['partial'] = self.partial + + if self.sort: + params['sort'] = ','.join(self.sort) + + return params + + +class SearchConnection(object): + + def __init__(self, domain=None, endpoint=None): + self.domain = domain + self.endpoint = endpoint + self.session = requests.Session() + + # Endpoint needs to be set before initializing CloudSearchDomainConnection + if not endpoint: + self.endpoint = domain.search_service_endpoint + + # Copy proxy settings from connection and check if request should be signed + self.sign_request = False + if self.domain and self.domain.layer1: + if self.domain.layer1.use_proxy: + self.session.proxies['http'] = self.domain.layer1.get_proxy_url_with_auth() + + self.sign_request = getattr(self.domain.layer1, 'sign_request', False) + + if self.sign_request: + layer1 = self.domain.layer1 + self.domain_connection = CloudSearchDomainConnection( + host=self.endpoint, + aws_access_key_id=layer1.aws_access_key_id, + aws_secret_access_key=layer1.aws_secret_access_key, + region=layer1.region, + provider=layer1.provider + ) + + def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, + partial=None, options=None): + return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields, + size=size, start=start, facet=facet, highlight=highlight, + sort=sort, partial=partial, options=options) + + def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None, + size=10, start=0, facet=None, highlight=None, sort=None, partial=None, + options=None): + """ + Send a query to CloudSearch + + Each search query should use at least the q or bq argument to specify + the search parameter. The other options are used to specify the + criteria of the search. + + :type q: string + :param q: A string to search the default search fields for. + + :type parser: string + :param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax' + + :type fq: string + :param fq: The filter query to use. + + :type sort: List of strings + :param sort: A list of fields or rank expressions used to order the + search results. Order is handled by adding 'desc' or 'asc' after the field name. + ``['year desc', 'author asc']`` + + :type return_fields: List of strings + :param return_fields: A list of fields which should be returned by the + search. If this field is not specified, only IDs will be returned. + ``['headline']`` + + :type size: int + :param size: Number of search results to specify + + :type start: int + :param start: Offset of the first search result to return (can be used + for paging) + + :type facet: dict + :param facet: Dictionary of fields for which facets should be returned + The facet value is string of JSON options + ``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}`` + + :type highlight: dict + :param highlight: Dictionary of fields for which highlights should be returned + The facet value is string of JSON options + ``{'genres': '{format:'text',max_phrases:2,pre_tag:'',post_tag:''}'}`` + + :type partial: bool + :param partial: Should partial results from a partioned service be returned if + one or more index partitions are unreachable. + + :type options: str + :param options: Options for the query parser specified in *parser*. + Specified as a string in JSON format. + ``{fields: ['title^5', 'description']}`` + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: Returns the results of this search + + The following examples all assume we have indexed a set of documents + with fields: *author*, *date*, *headline* + + A simple search will look for documents whose default text search + fields will contain the search word exactly: + + >>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy) + + A simple search with more keywords will return documents whose default + text search fields contain the search strings together or separately. + + >>> search(q='Tim apple') # Will match "tim" and "apple" + + More complex searches require the boolean search operator. + + Wildcard searches can be used to search for any words that start with + the search string. + + >>> search(q="'Tim*'") # Return documents with words like Tim or Timothy) + + Search terms can also be combined. Allowed operators are "and", "or", + "not", "field", "optional", "token", "phrase", or "filter" + + >>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured') + + Facets allow you to show classification information about the search + results. For example, you can retrieve the authors who have written + about Tim with a max of 3 + + >>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'}) + """ + + query = self.build_query(q=q, parser=parser, fq=fq, rank=rank, + return_fields=return_fields, + size=size, start=start, facet=facet, + highlight=highlight, sort=sort, + partial=partial, options=options) + return self(query) + + def _search_with_auth(self, params): + return self.domain_connection.search(params.pop("q", ""), **params) + + def _search_without_auth(self, params, api_version): + url = "http://%s/%s/search" % (self.endpoint, api_version) + resp = self.session.get(url, params=params) + + return {'body': resp.content.decode('utf-8'), 'status_code': resp.status_code} + + def __call__(self, query): + """Make a call to CloudSearch + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: :class:`boto.cloudsearch2.search.SearchResults` + :return: search results + """ + api_version = '2013-01-01' + if self.domain and self.domain.layer1: + api_version = self.domain.layer1.APIVersion + + if self.sign_request: + data = self._search_with_auth(query.to_domain_connection_params()) + else: + r = self._search_without_auth(query.to_params(), api_version) + + _body = r['body'] + _status_code = r['status_code'] + + try: + data = json.loads(_body) + except ValueError: + if _status_code == 403: + msg = '' + import re + g = re.search('

    403 Forbidden

    ([^<]+)<', _body) + try: + msg = ': %s' % (g.groups()[0].strip()) + except AttributeError: + pass + raise SearchServiceException('Authentication error from Amazon%s' % msg) + raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query) + + if 'messages' in data and 'error' in data: + for m in data['messages']: + if m['severity'] == 'fatal': + raise SearchServiceException("Error processing search %s " + "=> %s" % (params, m['message']), query) + elif 'error' in data: + raise SearchServiceException("Unknown error processing search %s" + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self + + return SearchResults(**data) + + def get_all_paged(self, query, per_page): + """Get a generator to iterate over all pages of search results + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :type per_page: int + :param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object. + + :rtype: generator + :return: Generator containing :class:`boto.cloudsearch2.search.SearchResults` + """ + query.update_size(per_page) + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + yield results + query.start += query.real_size + page += 1 + + def get_all_hits(self, query): + """Get a generator to iterate over all search results + + Transparently handles the results paging from Cloudsearch + search results so even if you have many thousands of results + you can iterate over all results in a reasonably efficient + manner. + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: A group of search criteria + + :rtype: generator + :return: All docs matching query + """ + page = 0 + num_pages_needed = 0 + while page <= num_pages_needed: + results = self(query) + num_pages_needed = results.num_pages_needed + for doc in results: + yield doc + query.start += query.real_size + page += 1 + + def get_num_hits(self, query): + """Return the total number of hits for query + + :type query: :class:`boto.cloudsearch2.search.Query` + :param query: a group of search criteria + + :rtype: int + :return: Total number of hits for query + """ + query.update_size(1) + return self(query).hits diff --git a/ext/boto/cloudsearchdomain/__init__.py b/ext/boto/cloudsearchdomain/__init__.py new file mode 100644 index 0000000000..2d72449d37 --- /dev/null +++ b/ext/boto/cloudsearchdomain/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon CloudSearch Domain service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return get_regions('cloudsearchdomain', + connection_cls=CloudSearchDomainConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cloudsearchdomain.layer1 import CloudSearchDomainConnection + return connect('cloudsearchdomain', region_name, + connection_cls=CloudSearchDomainConnection, **kw_params) diff --git a/ext/boto/cloudsearchdomain/exceptions.py b/ext/boto/cloudsearchdomain/exceptions.py new file mode 100644 index 0000000000..0f9961532d --- /dev/null +++ b/ext/boto/cloudsearchdomain/exceptions.py @@ -0,0 +1,30 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class SearchException(BotoServerError): + pass + + +class DocumentServiceException(BotoServerError): + pass diff --git a/ext/boto/cloudsearchdomain/layer1.py b/ext/boto/cloudsearchdomain/layer1.py new file mode 100644 index 0000000000..7a68bbed22 --- /dev/null +++ b/ext/boto/cloudsearchdomain/layer1.py @@ -0,0 +1,540 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.cloudsearchdomain import exceptions + + +class CloudSearchDomainConnection(AWSAuthConnection): + """ + You use the AmazonCloudSearch2013 API to upload documents to a + search domain and search those documents. + + The endpoints for submitting `UploadDocuments`, `Search`, and + `Suggest` requests are domain-specific. To get the endpoints for + your domain, use the Amazon CloudSearch configuration service + `DescribeDomains` action. The domain endpoints are also displayed + on the domain dashboard in the Amazon CloudSearch console. You + submit suggest requests to the search endpoint. + + For more information, see the `Amazon CloudSearch Developer + Guide`_. + """ + APIVersion = "2013-01-01" + AuthServiceName = 'cloudsearch' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "SearchException": exceptions.SearchException, + "DocumentServiceException": exceptions.DocumentServiceException, + } + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + if kwargs.get('host', None) is None: + raise ValueError( + 'The argument, host, must be provided when creating a ' + 'CloudSearchDomainConnection because its methods require the ' + 'specific domain\'s endpoint in order to successfully make ' + 'requests to that CloudSearch Domain.' + ) + super(CloudSearchDomainConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def search(self, query, cursor=None, expr=None, facet=None, + filter_query=None, highlight=None, partial=None, + query_options=None, query_parser=None, ret=None, size=None, + sort=None, start=None): + """ + Retrieves a list of documents that match the specified search + criteria. How you specify the search criteria depends on which + query parser you use. Amazon CloudSearch supports four query + parsers: + + + + `simple`: search all `text` and `text-array` fields for the + specified string. Search for phrases, individual terms, and + prefixes. + + `structured`: search specific fields, construct compound + queries using Boolean operators, and use advanced features + such as term boosting and proximity searching. + + `lucene`: specify search criteria using the Apache Lucene + query parser syntax. + + `dismax`: specify search criteria using the simplified + subset of the Apache Lucene query parser syntax defined by the + DisMax query parser. + + + For more information, see `Searching Your Data`_ in the Amazon + CloudSearch Developer Guide . + + The endpoint for submitting `Search` requests is domain- + specific. You submit search requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type cursor: string + :param cursor: Retrieves a cursor value you can use to page through + large result sets. Use the `size` parameter to control the number + of hits to include in each response. You can specify either the + `cursor` or `start` parameter in a request; they are mutually + exclusive. To get the first cursor, set the cursor value to + `initial`. In subsequent requests, specify the cursor value + returned in the hits section of the response. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + :type expr: string + :param expr: Defines one or more numeric expressions that can be used + to sort results or specify search or filter criteria. You can also + specify expressions as return fields. + For more information about defining and using expressions, see + `Configuring Expressions`_ in the Amazon CloudSearch Developer + Guide . + + :type facet: string + :param facet: Specifies one or more fields for which to get facet + information, and options that control how the facet information is + returned. Each specified field must be facet-enabled in the domain + configuration. The fields and options are specified in JSON using + the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI + ON":VALUE,"OPTION":"STRING"}}`. + You can specify the following faceting options: + + + + `buckets` specifies an array of the facet values or ranges to count. + Ranges are specified using the same syntax that you use to search + for a range of values. For more information, see ` Searching for a + Range of Values`_ in the Amazon CloudSearch Developer Guide . + Buckets are returned in the order they are specified in the + request. The `sort` and `size` options are not valid if you specify + `buckets`. + + `size` specifies the maximum number of facets to include in the + results. By default, Amazon CloudSearch returns counts for the top + 10. The `size` parameter is only valid when you specify the `sort` + option; it cannot be used in conjunction with `buckets`. + + `sort` specifies how you want to sort the facets in the results: + `bucket` or `count`. Specify `bucket` to sort alphabetically or + numerically by facet value (in ascending order). Specify `count` to + sort by the facet counts computed for each facet value (in + descending order). To retrieve facet counts for particular values + or ranges of values, use the `buckets` option instead of `sort`. + + + If no facet options are specified, facet counts are computed for all + field values, the facets are sorted by facet count, and the top 10 + facets are returned in the results. + + For more information, see `Getting and Using Facet Information`_ in the + Amazon CloudSearch Developer Guide . + + :type filter_query: string + :param filter_query: Specifies a structured query that filters the + results of a search without affecting how the results are scored + and sorted. You use `filterQuery` in conjunction with the `query` + parameter to filter the documents that match the constraints + specified in the `query` parameter. Specifying a filter controls + only which matching documents are included in the results, it has + no effect on how they are scored and sorted. The `filterQuery` + parameter supports the full structured query syntax. + For more information about using filters, see `Filtering Matching + Documents`_ in the Amazon CloudSearch Developer Guide . + + :type highlight: string + :param highlight: Retrieves highlights for matches in the specified + `text` or `text-array` fields. Each specified field must be + highlight enabled in the domain configuration. The fields and + options are specified in JSON using the form `{"FIELD":{"OPTION":VA + LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`. + You can specify the following highlight options: + + + + `format`: specifies the format of the data in the text field: `text` + or `html`. When data is returned as HTML, all non-alphanumeric + characters are encoded. The default is `html`. + + `max_phrases`: specifies the maximum number of occurrences of the + search term(s) you want to highlight. By default, the first + occurrence is highlighted. + + `pre_tag`: specifies the string to prepend to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + `post_tag`: specifies the string to append to an occurrence of a + search term. The default for HTML highlights is ``. The + default for text highlights is `*`. + + + If no highlight options are specified for a field, the returned field + text is treated as HTML and the first match is highlighted with + emphasis tags: `search-term`. + + :type partial: boolean + :param partial: Enables partial results to be returned if one or more + index partitions are unavailable. When your search index is + partitioned across multiple search instances, by default Amazon + CloudSearch only returns results if every partition can be queried. + This means that the failure of a single search instance can result + in 5xx (internal server) errors. When you enable partial results, + Amazon CloudSearch returns whatever results are available and + includes the percentage of documents searched in the search results + (percent-searched). This enables you to more gracefully degrade + your users' search experience. For example, rather than displaying + no results, you could display the partial results and a message + indicating that the results might be incomplete due to a temporary + system outage. + + :type query: string + :param query: Specifies the search criteria for the request. How you + specify the search criteria depends on the query parser used for + the request and the parser options specified in the `queryOptions` + parameter. By default, the `simple` query parser is used to process + requests. To use the `structured`, `lucene`, or `dismax` query + parser, you must also specify the `queryParser` parameter. + For more information about specifying search criteria, see `Searching + Your Data`_ in the Amazon CloudSearch Developer Guide . + + :type query_options: string + :param query_options: + Configures options for the query parser specified in the `queryParser` + parameter. + + The options you can configure vary according to which parser you use: + + + + `defaultOperator`: The default operator used to combine individual + terms in the search string. For example: `defaultOperator: 'or'`. + For the `dismax` parser, you specify a percentage that represents + the percentage of terms in the search string (rounded down) that + must match, rather than a default operator. A value of `0%` is the + equivalent to OR, and a value of `100%` is equivalent to AND. The + percentage must be specified as a value in the range 0-100 followed + by the percent (%) symbol. For example, `defaultOperator: 50%`. + Valid values: `and`, `or`, a percentage in the range 0%-100% ( + `dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or + `100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `fields`: An array of the fields to search when no fields are + specified in a search. If no fields are specified in a search and + this option is not specified, all text and text-array fields are + searched. You can specify a weight for each field to control the + relative importance of each field when Amazon CloudSearch + calculates relevance scores. To specify a field weight, append a + caret ( `^`) symbol and the weight to the field name. For example, + to boost the importance of the `title` field over the `description` + field you could specify: `"fields":["title^5","description"]`. + Valid values: The name of any configured field and an optional + numeric value greater than zero. Default: All `text` and `text- + array` fields. Valid for: `simple`, `structured`, `lucene`, and + `dismax`. + + `operators`: An array of the operators or special characters you want + to disable for the simple query parser. If you disable the `and`, + `or`, or `not` operators, the corresponding operators ( `+`, `|`, + `-`) have no special meaning and are dropped from the search + string. Similarly, disabling `prefix` disables the wildcard + operator ( `*`) and disabling `phrase` disables the ability to + search for phrases by enclosing phrases in double quotes. Disabling + precedence disables the ability to control order of precedence + using parentheses. Disabling `near` disables the ability to use the + ~ operator to perform a sloppy phrase search. Disabling the `fuzzy` + operator disables the ability to use the ~ operator to perform a + fuzzy search. `escape` disables the ability to use a backslash ( + `\`) to escape special characters within the search string. + Disabling whitespace is an advanced option that prevents the parser + from tokenizing on whitespace, which can be useful for Vietnamese. + (It prevents Vietnamese words from being split incorrectly.) For + example, you could disable all operators other than the phrase + operator to support just simple term and phrase queries: + `"operators":["and","not","or", "prefix"]`. Valid values: `and`, + `escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`, + `prefix`, `whitespace`. Default: All operators and special + characters are enabled. Valid for: `simple`. + + `phraseFields`: An array of the `text` or `text-array` fields you + want to use for phrase searches. When the terms in the search + string appear in close proximity within a field, the field scores + higher. You can specify a weight for each field to boost that + score. The `phraseSlop` option controls how much the matches can + deviate from the search string and still be boosted. To specify a + field weight, append a caret ( `^`) symbol and the weight to the + field name. For example, to boost phrase matches in the `title` + field over the `abstract` field, you could specify: + `"phraseFields":["title^3", "plot"]` Valid values: The name of any + `text` or `text-array` field and an optional numeric value greater + than zero. Default: No fields. If you don't specify any fields with + `phraseFields`, proximity scoring is disabled even if `phraseSlop` + is specified. Valid for: `dismax`. + + `phraseSlop`: An integer value that specifies how much matches can + deviate from the search phrase and still be boosted according to + the weights specified in the `phraseFields` option; for example, + `phraseSlop: 2`. You must also specify `phraseFields` to enable + proximity scoring. Valid values: positive integers. Default: 0. + Valid for: `dismax`. + + `explicitPhraseSlop`: An integer value that specifies how much a + match can deviate from the search phrase when the phrase is + enclosed in double quotes in the search string. (Phrases that + exceed this proximity distance are not considered a match.) For + example, to specify a slop of three for dismax phrase queries, you + would specify `"explicitPhraseSlop":3`. Valid values: positive + integers. Default: 0. Valid for: `dismax`. + + `tieBreaker`: When a term in the search string is found in a + document's field, a score is calculated for that field based on how + common the word is in that field compared to other documents. If + the term occurs in multiple fields within a document, by default + only the highest scoring field contributes to the document's + overall score. You can specify a `tieBreaker` value to enable the + matches in lower-scoring fields to contribute to the document's + score. That way, if two documents have the same max field score for + a particular term, the score for the document that has matches in + more fields will be higher. The formula for calculating the score + with a tieBreaker is `(max field score) + (tieBreaker) * (sum of + the scores for the rest of the matching fields)`. Set `tieBreaker` + to 0 to disregard all but the highest scoring field (pure max): + `"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure + sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0. + Valid for: `dismax`. + + :type query_parser: string + :param query_parser: + Specifies which query parser to use to process the request. If + `queryParser` is not specified, Amazon CloudSearch uses the + `simple` query parser. + + Amazon CloudSearch supports four query parsers: + + + + `simple`: perform simple searches of `text` and `text-array` fields. + By default, the `simple` query parser searches all `text` and + `text-array` fields. You can specify which fields to search by with + the `queryOptions` parameter. If you prefix a search term with a + plus sign (+) documents must contain the term to be considered a + match. (This is the default, unless you configure the default + operator with the `queryOptions` parameter.) You can use the `-` + (NOT), `|` (OR), and `*` (wildcard) operators to exclude particular + terms, find results that match any of the specified terms, or + search for a prefix. To search for a phrase rather than individual + terms, enclose the phrase in double quotes. For more information, + see `Searching for Text`_ in the Amazon CloudSearch Developer Guide + . + + `structured`: perform advanced searches by combining multiple + expressions to define the search criteria. You can also search + within particular fields, search for values and ranges of values, + and use advanced options such as term boosting, `matchall`, and + `near`. For more information, see `Constructing Compound Queries`_ + in the Amazon CloudSearch Developer Guide . + + `lucene`: search using the Apache Lucene query parser syntax. For + more information, see `Apache Lucene Query Parser Syntax`_. + + `dismax`: search using the simplified subset of the Apache Lucene + query parser syntax defined by the DisMax query parser. For more + information, see `DisMax Query Parser Syntax`_. + + :type ret: string + :param ret: Specifies the field and expression values to include in + the response. Multiple fields or expressions are specified as a + comma-separated list. By default, a search response includes all + return enabled fields ( `_all_fields`). To return only the document + IDs for the matching documents, specify `_no_fields`. To retrieve + the relevance score calculated for each document, specify `_score`. + + :type size: long + :param size: Specifies the maximum number of search hits to include in + the response. + + :type sort: string + :param sort: Specifies the fields or custom expressions to use to sort + the search results. Multiple fields or expressions are specified as + a comma-separated list. You must specify the sort direction ( `asc` + or `desc`) for each field; for example, `year desc,title asc`. To + use a field to sort results, the field must be sort-enabled in the + domain configuration. Array type fields cannot be used for sorting. + If no `sort` parameter is specified, results are sorted by their + default relevance scores in descending order: `_score desc`. You + can also sort by document ID ( `_id asc`) and version ( `_version + desc`). + For more information, see `Sorting Results`_ in the Amazon CloudSearch + Developer Guide . + + :type start: long + :param start: Specifies the offset of the first search hit you want to + return. Note that the result set is zero-based; the first result is + at index 0. You can specify either the `start` or `cursor` + parameter in a request, they are mutually exclusive. + For more information, see `Paginating Results`_ in the Amazon + CloudSearch Developer Guide . + + """ + uri = '/2013-01-01/search' + params = {} + headers = {} + query_params = {} + if cursor is not None: + query_params['cursor'] = cursor + if expr is not None: + query_params['expr'] = expr + if facet is not None: + query_params['facet'] = facet + if filter_query is not None: + query_params['fq'] = filter_query + if highlight is not None: + query_params['highlight'] = highlight + if partial is not None: + query_params['partial'] = partial + if query is not None: + query_params['q'] = query + if query_options is not None: + query_params['q.options'] = query_options + if query_parser is not None: + query_params['q.parser'] = query_parser + if ret is not None: + query_params['return'] = ret + if size is not None: + query_params['size'] = size + if sort is not None: + query_params['sort'] = sort + if start is not None: + query_params['start'] = start + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def suggest(self, query, suggester, size=None): + """ + Retrieves autocomplete suggestions for a partial query string. + You can use suggestions enable you to display likely matches + before users finish typing. In Amazon CloudSearch, suggestions + are based on the contents of a particular text field. When you + request suggestions, Amazon CloudSearch finds all of the + documents whose values in the suggester field start with the + specified query string. The beginning of the field must match + the query string to be considered a match. + + For more information about configuring suggesters and + retrieving suggestions, see `Getting Suggestions`_ in the + Amazon CloudSearch Developer Guide . + + The endpoint for submitting `Suggest` requests is domain- + specific. You submit suggest requests to a domain's search + endpoint. To get the search endpoint for your domain, use the + Amazon CloudSearch configuration service `DescribeDomains` + action. A domain's endpoints are also displayed on the domain + dashboard in the Amazon CloudSearch console. + + :type query: string + :param query: Specifies the string for which you want to get + suggestions. + + :type suggester: string + :param suggester: Specifies the name of the suggester to use to find + suggested matches. + + :type size: long + :param size: Specifies the maximum number of suggestions to return. + + """ + uri = '/2013-01-01/suggest' + params = {} + headers = {} + query_params = {} + if query is not None: + query_params['q'] = query + if suggester is not None: + query_params['suggester'] = suggester + if size is not None: + query_params['size'] = size + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def upload_documents(self, documents, content_type): + """ + Posts a batch of documents to a search domain for indexing. A + document batch is a collection of add and delete operations + that represent the documents you want to add, update, or + delete from your domain. Batches can be described in either + JSON or XML. Each item that you want Amazon CloudSearch to + return as a search result (such as a product) is represented + as a document. Every document has a unique ID and one or more + fields that contain the data that you want to search and + return in results. Individual documents cannot contain more + than 1 MB of data. The entire batch cannot exceed 5 MB. To get + the best possible upload performance, group add and delete + operations in batches that are close the 5 MB limit. + Submitting a large volume of single-document batches can + overload a domain's document service. + + The endpoint for submitting `UploadDocuments` requests is + domain-specific. To get the document endpoint for your domain, + use the Amazon CloudSearch configuration service + `DescribeDomains` action. A domain's endpoints are also + displayed on the domain dashboard in the Amazon CloudSearch + console. + + For more information about formatting your data for Amazon + CloudSearch, see `Preparing Your Data`_ in the Amazon + CloudSearch Developer Guide . For more information about + uploading data for indexing, see `Uploading Data`_ in the + Amazon CloudSearch Developer Guide . + + :type documents: blob + :param documents: A batch of documents formatted in JSON or HTML. + + :type content_type: string + :param content_type: + The format of the batch you are uploading. Amazon CloudSearch supports + two document batch formats: + + + + application/json + + application/xml + + """ + uri = '/2013-01-01/documents/batch' + headers = {} + query_params = {} + if content_type is not None: + headers['Content-Type'] = content_type + return self.make_request('POST', uri, expected_status=200, + data=documents, headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + raise JSONResponseError(response.status, response.reason, body) diff --git a/ext/boto/cloudtrail/__init__.py b/ext/boto/cloudtrail/__init__.py new file mode 100644 index 0000000000..490d93cfb9 --- /dev/null +++ b/ext/boto/cloudtrail/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Cloudtrail service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + return get_regions('cloudtrail', connection_cls=CloudTrailConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cloudtrail.layer1 import CloudTrailConnection + return connect('cloudtrail', region_name, + connection_cls=CloudTrailConnection, **kw_params) diff --git a/ext/boto/cloudtrail/exceptions.py b/ext/boto/cloudtrail/exceptions.py new file mode 100644 index 0000000000..d2c1b735b6 --- /dev/null +++ b/ext/boto/cloudtrail/exceptions.py @@ -0,0 +1,118 @@ +""" +Exceptions that are specific to the cloudtrail module. +""" +from boto.exception import BotoServerError + + +class InvalidSnsTopicNameException(BotoServerError): + """ + Raised when an invalid SNS topic name is passed to Cloudtrail. + """ + pass + + +class InvalidS3BucketNameException(BotoServerError): + """ + Raised when an invalid S3 bucket name is passed to Cloudtrail. + """ + pass + + +class TrailAlreadyExistsException(BotoServerError): + """ + Raised when the given trail name already exists. + """ + pass + + +class InsufficientSnsTopicPolicyException(BotoServerError): + """ + Raised when the SNS topic does not allow Cloudtrail to post + messages. + """ + pass + + +class InvalidTrailNameException(BotoServerError): + """ + Raised when the trail name is invalid. + """ + pass + + +class InternalErrorException(BotoServerError): + """ + Raised when there was an internal Cloudtrail error. + """ + pass + + +class TrailNotFoundException(BotoServerError): + """ + Raised when the given trail name is not found. + """ + pass + + +class S3BucketDoesNotExistException(BotoServerError): + """ + Raised when the given S3 bucket does not exist. + """ + pass + + +class TrailNotProvidedException(BotoServerError): + """ + Raised when no trail name was provided. + """ + pass + + +class InvalidS3PrefixException(BotoServerError): + """ + Raised when an invalid key prefix is given. + """ + pass + + +class MaximumNumberOfTrailsExceededException(BotoServerError): + """ + Raised when no more trails can be created. + """ + pass + + +class InsufficientS3BucketPolicyException(BotoServerError): + """ + Raised when the S3 bucket does not allow Cloudtrail to + write files into the prefix. + """ + pass + + +class InvalidMaxResultsException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class InvalidLookupAttributesException(BotoServerError): + pass + + +class InvalidCloudWatchLogsLogGroupArnException(BotoServerError): + pass + + +class InvalidCloudWatchLogsRoleArnException(BotoServerError): + pass + + +class CloudWatchLogsDeliveryUnavailableException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass diff --git a/ext/boto/cloudtrail/layer1.py b/ext/boto/cloudtrail/layer1.py new file mode 100644 index 0000000000..f233f321b5 --- /dev/null +++ b/ext/boto/cloudtrail/layer1.py @@ -0,0 +1,374 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudtrail import exceptions +from boto.compat import json + + +class CloudTrailConnection(AWSQueryConnection): + """ + AWS CloudTrail + This is the CloudTrail API Reference. It provides descriptions of + actions, data types, common parameters, and common errors for + CloudTrail. + + CloudTrail is a web service that records AWS API calls for your + AWS account and delivers log files to an Amazon S3 bucket. The + recorded information includes the identity of the user, the start + time of the AWS API call, the source IP address, the request + parameters, and the response elements returned by the service. + + As an alternative to using the API, you can use one of the AWS + SDKs, which consist of libraries and sample code for various + programming languages and platforms (Java, Ruby, .NET, iOS, + Android, etc.). The SDKs provide a convenient way to create + programmatic access to AWSCloudTrail. For example, the SDKs take + care of cryptographically signing requests, managing errors, and + retrying requests automatically. For information about the AWS + SDKs, including how to download and install them, see the `Tools + for Amazon Web Services page`_. + + See the CloudTrail User Guide for information about the data that + is included with each AWS API call listed in the log files. + """ + APIVersion = "2013-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com" + ServiceName = "CloudTrail" + TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101" + ResponseError = JSONResponseError + + _faults = { + "InvalidMaxResultsException": exceptions.InvalidMaxResultsException, + "InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException, + "InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException, + "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "InvalidLookupAttributesException": exceptions.InvalidLookupAttributesException, + "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException, + "InvalidCloudWatchLogsLogGroupArnException": exceptions.InvalidCloudWatchLogsLogGroupArnException, + "InvalidCloudWatchLogsRoleArnException": exceptions.InvalidCloudWatchLogsRoleArnException, + "InvalidTrailNameException": exceptions.InvalidTrailNameException, + "CloudWatchLogsDeliveryUnavailableException": exceptions.CloudWatchLogsDeliveryUnavailableException, + "TrailNotFoundException": exceptions.TrailNotFoundException, + "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidS3PrefixException": exceptions.InvalidS3PrefixException, + "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException, + "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudTrailConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_trail(self, name, s3_bucket_name, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): + """ + From the command line, use `create-subscription`. + + Creates a trail that specifies the settings for delivery of + log data to an Amazon S3 bucket. + + :type name: string + :param name: Specifies the name of the trail. + + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. + + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. + + """ + params = {'Name': name, 'S3BucketName': s3_bucket_name, } + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn + return self.make_request(action='CreateTrail', + body=json.dumps(params)) + + def delete_trail(self, name): + """ + Deletes a trail. + + :type name: string + :param name: The name of a trail to be deleted. + + """ + params = {'Name': name, } + return self.make_request(action='DeleteTrail', + body=json.dumps(params)) + + def describe_trails(self, trail_name_list=None): + """ + Retrieves settings for the trail associated with the current + region for your account. + + :type trail_name_list: list + :param trail_name_list: The trail returned. + + """ + params = {} + if trail_name_list is not None: + params['trailNameList'] = trail_name_list + return self.make_request(action='DescribeTrails', + body=json.dumps(params)) + + def get_trail_status(self, name): + """ + Returns a JSON-formatted list of information about the + specified trail. Fields include information on delivery + errors, Amazon SNS and Amazon S3 errors, and start and stop + logging times for each trail. + + :type name: string + :param name: The name of the trail for which you are requesting the + current status. + + """ + params = {'Name': name, } + return self.make_request(action='GetTrailStatus', + body=json.dumps(params)) + + def lookup_events(self, lookup_attributes=None, start_time=None, + end_time=None, max_results=None, next_token=None): + """ + Looks up API activity events captured by CloudTrail that + create, update, or delete resources in your account. Events + for a region can be looked up for the times in which you had + CloudTrail turned on in that region during the last seven + days. Lookup supports five different attributes: time range + (defined by a start time and end time), user name, event name, + resource type, and resource name. All attributes are optional. + The maximum number of attributes that can be specified in any + one lookup request are time range and one other attribute. The + default number of results returned is 10, with a maximum of 50 + possible. The response includes a token that you can use to + get the next page of results. + The rate of lookup requests is limited to one per second per + account. If this limit is exceeded, a throttling error occurs. + Events that occurred during the selected time range will not + be available for lookup if CloudTrail logging was not enabled + when the events occurred. + + :type lookup_attributes: list + :param lookup_attributes: Contains a list of lookup attributes. + Currently the list can contain only one item. + + :type start_time: timestamp + :param start_time: Specifies that only events that occur after or at + the specified time are returned. If the specified start time is + after the specified end time, an error is returned. + + :type end_time: timestamp + :param end_time: Specifies that only events that occur before or at the + specified time are returned. If the specified end time is before + the specified start time, an error is returned. + + :type max_results: integer + :param max_results: The number of events to return. Possible values are + 1 through 50. The default is 10. + + :type next_token: string + :param next_token: The token to use to get the next page of results + after a previous API call. This token must be passed in with the + same parameters that were specified in the the original call. For + example, if the original call specified an AttributeKey of + 'Username' with a value of 'root', the call with NextToken should + include those same parameters. + + """ + params = {} + if lookup_attributes is not None: + params['LookupAttributes'] = lookup_attributes + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupEvents', + body=json.dumps(params)) + + def start_logging(self, name): + """ + Starts the recording of AWS API calls and log file delivery + for a trail. + + :type name: string + :param name: The name of the trail for which CloudTrail logs AWS API + calls. + + """ + params = {'Name': name, } + return self.make_request(action='StartLogging', + body=json.dumps(params)) + + def stop_logging(self, name): + """ + Suspends the recording of AWS API calls and log file delivery + for the specified trail. Under most circumstances, there is no + need to use this action. You can update a trail without + stopping it first. This action is the only way to stop + recording. + + :type name: string + :param name: Communicates to CloudTrail the name of the trail for which + to stop logging AWS API calls. + + """ + params = {'Name': name, } + return self.make_request(action='StopLogging', + body=json.dumps(params)) + + def update_trail(self, name, s3_bucket_name=None, s3_key_prefix=None, + sns_topic_name=None, include_global_service_events=None, + cloud_watch_logs_log_group_arn=None, + cloud_watch_logs_role_arn=None): + """ + From the command line, use `update-subscription`. + + Updates the settings that specify delivery of log files. + Changes to a trail do not require stopping the CloudTrail + service. Use this action to designate an existing bucket for + log delivery. If the existing bucket has previously been a + target for CloudTrail log files, an IAM policy exists for the + bucket. + + :type name: string + :param name: Specifies the name of the trail. + + :type s3_bucket_name: string + :param s3_bucket_name: Specifies the name of the Amazon S3 bucket + designated for publishing log files. + + :type s3_key_prefix: string + :param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes + the name of the bucket you have designated for log file delivery. + + :type sns_topic_name: string + :param sns_topic_name: Specifies the name of the Amazon SNS topic + defined for notification of log file delivery. + + :type include_global_service_events: boolean + :param include_global_service_events: Specifies whether the trail is + publishing events from global services such as IAM to the log + files. + + :type cloud_watch_logs_log_group_arn: string + :param cloud_watch_logs_log_group_arn: Specifies a log group name using + an Amazon Resource Name (ARN), a unique identifier that represents + the log group to which CloudTrail logs will be delivered. Not + required unless you specify CloudWatchLogsRoleArn. + + :type cloud_watch_logs_role_arn: string + :param cloud_watch_logs_role_arn: Specifies the role for the CloudWatch + Logs endpoint to assume to write to a users log group. + + """ + params = {'Name': name, } + if s3_bucket_name is not None: + params['S3BucketName'] = s3_bucket_name + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + if sns_topic_name is not None: + params['SnsTopicName'] = sns_topic_name + if include_global_service_events is not None: + params['IncludeGlobalServiceEvents'] = include_global_service_events + if cloud_watch_logs_log_group_arn is not None: + params['CloudWatchLogsLogGroupArn'] = cloud_watch_logs_log_group_arn + if cloud_watch_logs_role_arn is not None: + params['CloudWatchLogsRoleArn'] = cloud_watch_logs_role_arn + return self.make_request(action='UpdateTrail', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/codedeploy/__init__.py b/ext/boto/codedeploy/__init__.py new file mode 100644 index 0000000000..af136f42cd --- /dev/null +++ b/ext/boto/codedeploy/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS CodeDeploy service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.codedeploy.layer1 import CodeDeployConnection + return get_regions('codedeploy', connection_cls=CodeDeployConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.codedeploy.layer1 import CodeDeployConnection + return connect('codedeploy', region_name, + connection_cls=CodeDeployConnection, **kw_params) diff --git a/ext/boto/codedeploy/exceptions.py b/ext/boto/codedeploy/exceptions.py new file mode 100644 index 0000000000..f23db8f041 --- /dev/null +++ b/ext/boto/codedeploy/exceptions.py @@ -0,0 +1,199 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidDeploymentIdException(BotoServerError): + pass + + +class InvalidDeploymentGroupNameException(BotoServerError): + pass + + +class DeploymentConfigAlreadyExistsException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class RoleRequiredException(BotoServerError): + pass + + +class DeploymentGroupAlreadyExistsException(BotoServerError): + pass + + +class DeploymentConfigLimitExceededException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidDeploymentConfigNameException(BotoServerError): + pass + + +class InvalidSortByException(BotoServerError): + pass + + +class InstanceDoesNotExistException(BotoServerError): + pass + + +class InvalidMinimumHealthyHostValueException(BotoServerError): + pass + + +class ApplicationLimitExceededException(BotoServerError): + pass + + +class ApplicationNameRequiredException(BotoServerError): + pass + + +class InvalidEC2TagException(BotoServerError): + pass + + +class DeploymentDoesNotExistException(BotoServerError): + pass + + +class DeploymentLimitExceededException(BotoServerError): + pass + + +class InvalidInstanceStatusException(BotoServerError): + pass + + +class RevisionRequiredException(BotoServerError): + pass + + +class InvalidBucketNameFilterException(BotoServerError): + pass + + +class DeploymentGroupLimitExceededException(BotoServerError): + pass + + +class DeploymentGroupDoesNotExistException(BotoServerError): + pass + + +class DeploymentConfigNameRequiredException(BotoServerError): + pass + + +class DeploymentAlreadyCompletedException(BotoServerError): + pass + + +class RevisionDoesNotExistException(BotoServerError): + pass + + +class DeploymentGroupNameRequiredException(BotoServerError): + pass + + +class DeploymentIdRequiredException(BotoServerError): + pass + + +class DeploymentConfigDoesNotExistException(BotoServerError): + pass + + +class BucketNameFilterRequiredException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class ApplicationDoesNotExistException(BotoServerError): + pass + + +class InvalidRevisionException(BotoServerError): + pass + + +class InvalidSortOrderException(BotoServerError): + pass + + +class InvalidOperationException(BotoServerError): + pass + + +class InvalidAutoScalingGroupException(BotoServerError): + pass + + +class InvalidApplicationNameException(BotoServerError): + pass + + +class DescriptionTooLongException(BotoServerError): + pass + + +class ApplicationAlreadyExistsException(BotoServerError): + pass + + +class InvalidDeployedStateFilterException(BotoServerError): + pass + + +class DeploymentNotStartedException(BotoServerError): + pass + + +class DeploymentConfigInUseException(BotoServerError): + pass + + +class InstanceIdRequiredException(BotoServerError): + pass + + +class InvalidKeyPrefixFilterException(BotoServerError): + pass + + +class InvalidDeploymentStatusException(BotoServerError): + pass diff --git a/ext/boto/codedeploy/layer1.py b/ext/boto/codedeploy/layer1.py new file mode 100644 index 0000000000..6c61a08342 --- /dev/null +++ b/ext/boto/codedeploy/layer1.py @@ -0,0 +1,899 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.codedeploy import exceptions + + +class CodeDeployConnection(AWSQueryConnection): + """ + AWS CodeDeploy **Overview** + This is the AWS CodeDeploy API Reference. This guide provides + descriptions of the AWS CodeDeploy APIs. For additional + information, see the `AWS CodeDeploy User Guide`_. + **Using the APIs** + You can use the AWS CodeDeploy APIs to work with the following + items: + + + + Applications , which are unique identifiers that AWS CodeDeploy + uses to ensure that the correct combinations of revisions, + deployment configurations, and deployment groups are being + referenced during deployments. You can work with applications by + calling CreateApplication, DeleteApplication, GetApplication, + ListApplications, BatchGetApplications, and UpdateApplication to + create, delete, and get information about applications, and to + change information about an application, respectively. + + Deployment configurations , which are sets of deployment rules + and deployment success and failure conditions that AWS CodeDeploy + uses during deployments. You can work with deployment + configurations by calling CreateDeploymentConfig, + DeleteDeploymentConfig, GetDeploymentConfig, and + ListDeploymentConfigs to create, delete, and get information about + deployment configurations, respectively. + + Deployment groups , which represent groups of Amazon EC2 + instances to which application revisions can be deployed. You can + work with deployment groups by calling CreateDeploymentGroup, + DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups, + and UpdateDeploymentGroup to create, delete, and get information + about single and multiple deployment groups, and to change + information about a deployment group, respectively. + + Deployment instances (also known simply as instances ), which + represent Amazon EC2 instances to which application revisions are + deployed. Deployment instances are identified by their Amazon EC2 + tags or Auto Scaling group names. Deployment instances belong to + deployment groups. You can work with deployment instances by + calling GetDeploymentInstance and ListDeploymentInstances to get + information about single and multiple deployment instances, + respectively. + + Deployments , which represent the process of deploying revisions + to deployment groups. You can work with deployments by calling + CreateDeployment, GetDeployment, ListDeployments, + BatchGetDeployments, and StopDeployment to create and get + information about deployments, and to stop a deployment, + respectively. + + Application revisions (also known simply as revisions ), which + are archive files that are stored in Amazon S3 buckets or GitHub + repositories. These revisions contain source content (such as + source code, web pages, executable files, any deployment scripts, + and similar) along with an Application Specification file (AppSpec + file). (The AppSpec file is unique to AWS CodeDeploy; it defines a + series of deployment actions that you want AWS CodeDeploy to + execute.) An application revision is uniquely identified by its + Amazon S3 object key and its ETag, version, or both. Application + revisions are deployed to deployment groups. You can work with + application revisions by calling GetApplicationRevision, + ListApplicationRevisions, and RegisterApplicationRevision to get + information about application revisions and to inform AWS + CodeDeploy about an application revision, respectively. + """ + APIVersion = "2014-10-06" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com" + ServiceName = "codedeploy" + TargetPrefix = "CodeDeploy_20141006" + ResponseError = JSONResponseError + + _faults = { + "InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException, + "InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException, + "DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException, + "InvalidRoleException": exceptions.InvalidRoleException, + "RoleRequiredException": exceptions.RoleRequiredException, + "DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException, + "DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException, + "InvalidSortByException": exceptions.InvalidSortByException, + "InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException, + "InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException, + "ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException, + "ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException, + "InvalidEC2TagException": exceptions.InvalidEC2TagException, + "DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException, + "DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException, + "InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException, + "RevisionRequiredException": exceptions.RevisionRequiredException, + "InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException, + "DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException, + "DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException, + "DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException, + "DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException, + "RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException, + "DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException, + "DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException, + "DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException, + "BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException, + "InvalidRevisionException": exceptions.InvalidRevisionException, + "InvalidSortOrderException": exceptions.InvalidSortOrderException, + "InvalidOperationException": exceptions.InvalidOperationException, + "InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException, + "InvalidApplicationNameException": exceptions.InvalidApplicationNameException, + "DescriptionTooLongException": exceptions.DescriptionTooLongException, + "ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException, + "InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException, + "DeploymentNotStartedException": exceptions.DeploymentNotStartedException, + "DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException, + "InstanceIdRequiredException": exceptions.InstanceIdRequiredException, + "InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException, + "InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CodeDeployConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def batch_get_applications(self, application_names=None): + """ + Gets information about one or more applications. + + :type application_names: list + :param application_names: A list of application names, with multiple + application names separated by spaces. + + """ + params = {} + if application_names is not None: + params['applicationNames'] = application_names + return self.make_request(action='BatchGetApplications', + body=json.dumps(params)) + + def batch_get_deployments(self, deployment_ids=None): + """ + Gets information about one or more deployments. + + :type deployment_ids: list + :param deployment_ids: A list of deployment IDs, with multiple + deployment IDs separated by spaces. + + """ + params = {} + if deployment_ids is not None: + params['deploymentIds'] = deployment_ids + return self.make_request(action='BatchGetDeployments', + body=json.dumps(params)) + + def create_application(self, application_name): + """ + Creates a new application. + + :type application_name: string + :param application_name: The name of the application. This name must be + unique within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='CreateApplication', + body=json.dumps(params)) + + def create_deployment(self, application_name, deployment_group_name=None, + revision=None, deployment_config_name=None, + description=None, + ignore_application_stop_failures=None): + """ + Deploys an application revision to the specified deployment + group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The deployment group's name. + + :type revision: dict + :param revision: The type of revision to deploy, along with information + about the revision's location. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + If not specified, the value configured in the deployment group will be + used as the default. If the deployment group does not have a + deployment configuration associated with it, then + CodeDeployDefault.OneAtATime will be used by default. + + :type description: string + :param description: A comment about the deployment. + + :type ignore_application_stop_failures: boolean + :param ignore_application_stop_failures: If set to true, then if the + deployment causes the ApplicationStop deployment lifecycle event to + fail to a specific instance, the deployment will not be considered + to have failed to that instance at that point and will continue on + to the BeforeInstall deployment lifecycle event. + If set to false or not specified, then if the deployment causes the + ApplicationStop deployment lifecycle event to fail to a specific + instance, the deployment will stop to that instance, and the + deployment to that instance will be considered to have failed. + + """ + params = {'applicationName': application_name, } + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if revision is not None: + params['revision'] = revision + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if description is not None: + params['description'] = description + if ignore_application_stop_failures is not None: + params['ignoreApplicationStopFailures'] = ignore_application_stop_failures + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_deployment_config(self, deployment_config_name, + minimum_healthy_hosts=None): + """ + Creates a new deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of the deployment configuration + to create. + + :type minimum_healthy_hosts: dict + :param minimum_healthy_hosts: The minimum number of healthy instances + that should be available at any time during the deployment. There + are two parameters expected in the input: type and value. + The type parameter takes either of the following values: + + + + HOST_COUNT: The value parameter represents the minimum number of + healthy instances, as an absolute value. + + FLEET_PERCENT: The value parameter represents the minimum number of + healthy instances, as a percentage of the total number of instances + in the deployment. If you specify FLEET_PERCENT, then at the start + of the deployment AWS CodeDeploy converts the percentage to the + equivalent number of instances and rounds fractional instances up. + + + The value parameter takes an integer. + + For example, to set a minimum of 95% healthy instances, specify a type + of FLEET_PERCENT and a value of 95. + + """ + params = {'deploymentConfigName': deployment_config_name, } + if minimum_healthy_hosts is not None: + params['minimumHealthyHosts'] = minimum_healthy_hosts + return self.make_request(action='CreateDeploymentConfig', + body=json.dumps(params)) + + def create_deployment_group(self, application_name, + deployment_group_name, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Creates a new deployment group for application revisions to be + deployed to. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type deployment_config_name: string + :param deployment_config_name: If specified, the deployment + configuration name must be one of the predefined values, or it can + be a custom deployment configuration: + + + CodeDeployDefault.AllAtOnce deploys an application revision to up to + all of the Amazon EC2 instances at once. The overall deployment + succeeds if the application revision deploys to at least one of the + instances. The overall deployment fails after the application + revision fails to deploy to all of the instances. For example, for + 9 instances, deploy to up to all 9 instances at once. The overall + deployment succeeds if any of the 9 instances is successfully + deployed to, and it fails if all 9 instances fail to be deployed + to. + + CodeDeployDefault.HalfAtATime deploys to up to half of the instances + at a time (with fractions rounded down). The overall deployment + succeeds if the application revision deploys to at least half of + the instances (with fractions rounded up); otherwise, the + deployment fails. For example, for 9 instances, deploy to up to 4 + instances at a time. The overall deployment succeeds if 5 or more + instances are successfully deployed to; otherwise, the deployment + fails. Note that the deployment may successfully deploy to some + instances, even if the overall deployment fails. + + CodeDeployDefault.OneAtATime deploys the application revision to only + one of the instances at a time. The overall deployment succeeds if + the application revision deploys to all of the instances. The + overall deployment fails after the application revision first fails + to deploy to any one instance. For example, for 9 instances, deploy + to one instance at a time. The overall deployment succeeds if all 9 + instances are successfully deployed to, and it fails if any of one + of the 9 instances fail to be deployed to. Note that the deployment + may successfully deploy to some instances, even if the overall + deployment fails. This is the default deployment configuration if a + configuration isn't specified for either the deployment or the + deployment group. + + + To create a custom deployment configuration, call the create deployment + configuration operation. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The Amazon EC2 tags to filter on. + + :type auto_scaling_groups: list + :param auto_scaling_groups: A list of associated Auto Scaling groups. + + :type service_role_arn: string + :param service_role_arn: A service role ARN that allows AWS CodeDeploy + to act on the user's behalf when interacting with AWS services. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='CreateDeploymentGroup', + body=json.dumps(params)) + + def delete_application(self, application_name): + """ + Deletes an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='DeleteApplication', + body=json.dumps(params)) + + def delete_deployment_config(self, deployment_config_name): + """ + Deletes a deployment configuration. + + A deployment configuration cannot be deleted if it is + currently in use. Also, predefined configurations cannot be + deleted. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='DeleteDeploymentConfig', + body=json.dumps(params)) + + def delete_deployment_group(self, application_name, + deployment_group_name): + """ + Deletes a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='DeleteDeploymentGroup', + body=json.dumps(params)) + + def get_application(self, application_name): + """ + Gets information about an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + """ + params = {'applicationName': application_name, } + return self.make_request(action='GetApplication', + body=json.dumps(params)) + + def get_application_revision(self, application_name, revision): + """ + Gets information about an application revision. + + :type application_name: string + :param application_name: The name of the application that corresponds + to the revision. + + :type revision: dict + :param revision: Information about the application revision to get, + including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + return self.make_request(action='GetApplicationRevision', + body=json.dumps(params)) + + def get_deployment(self, deployment_id): + """ + Gets information about a deployment. + + :type deployment_id: string + :param deployment_id: An existing deployment ID within the AWS user + account. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='GetDeployment', + body=json.dumps(params)) + + def get_deployment_config(self, deployment_config_name): + """ + Gets information about a deployment configuration. + + :type deployment_config_name: string + :param deployment_config_name: The name of an existing deployment + configuration within the AWS user account. + + """ + params = {'deploymentConfigName': deployment_config_name, } + return self.make_request(action='GetDeploymentConfig', + body=json.dumps(params)) + + def get_deployment_group(self, application_name, deployment_group_name): + """ + Gets information about a deployment group. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + """ + params = { + 'applicationName': application_name, + 'deploymentGroupName': deployment_group_name, + } + return self.make_request(action='GetDeploymentGroup', + body=json.dumps(params)) + + def get_deployment_instance(self, deployment_id, instance_id): + """ + Gets information about an Amazon EC2 instance as part of a + deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type instance_id: string + :param instance_id: The unique ID of an Amazon EC2 instance in the + deployment's deployment group. + + """ + params = { + 'deploymentId': deployment_id, + 'instanceId': instance_id, + } + return self.make_request(action='GetDeploymentInstance', + body=json.dumps(params)) + + def list_application_revisions(self, application_name, sort_by=None, + sort_order=None, s_3_bucket=None, + s_3_key_prefix=None, deployed=None, + next_token=None): + """ + Lists information about revisions for an application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type sort_by: string + :param sort_by: The column name to sort the list results by: + + + registerTime: Sort the list results by when the revisions were + registered with AWS CodeDeploy. + + firstUsedTime: Sort the list results by when the revisions were first + used by in a deployment. + + lastUsedTime: Sort the list results by when the revisions were last + used in a deployment. + + + If not specified or set to null, the results will be returned in an + arbitrary order. + + :type sort_order: string + :param sort_order: The order to sort the list results by: + + + ascending: Sort the list results in ascending order. + + descending: Sort the list results in descending order. + + + If not specified, the results will be sorted in ascending order. + + If set to null, the results will be sorted in an arbitrary order. + + :type s_3_bucket: string + :param s_3_bucket: A specific Amazon S3 bucket name to limit the search + for revisions. + If set to null, then all of the user's buckets will be searched. + + :type s_3_key_prefix: string + :param s_3_key_prefix: A specific key prefix for the set of Amazon S3 + objects to limit the search for revisions. + + :type deployed: string + :param deployed: + Whether to list revisions based on whether the revision is the target + revision of an deployment group: + + + + include: List revisions that are target revisions of a deployment + group. + + exclude: Do not list revisions that are target revisions of a + deployment group. + + ignore: List all revisions, regardless of whether they are target + revisions of a deployment group. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list application revisions call, which can be used to return the + next set of applications in the list. + + """ + params = {'applicationName': application_name, } + if sort_by is not None: + params['sortBy'] = sort_by + if sort_order is not None: + params['sortOrder'] = sort_order + if s_3_bucket is not None: + params['s3Bucket'] = s_3_bucket + if s_3_key_prefix is not None: + params['s3KeyPrefix'] = s_3_key_prefix + if deployed is not None: + params['deployed'] = deployed + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplicationRevisions', + body=json.dumps(params)) + + def list_applications(self, next_token=None): + """ + Lists the applications registered within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list applications call, which can be used to return the next set of + applications in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListApplications', + body=json.dumps(params)) + + def list_deployment_configs(self, next_token=None): + """ + Lists the deployment configurations within the AWS user + account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment configurations call, which can be used to return + the next set of deployment configurations in the list. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentConfigs', + body=json.dumps(params)) + + def list_deployment_groups(self, application_name, next_token=None): + """ + Lists the deployment groups for an application registered + within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment groups call, which can be used to return the next + set of deployment groups in the list. + + """ + params = {'applicationName': application_name, } + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeploymentGroups', + body=json.dumps(params)) + + def list_deployment_instances(self, deployment_id, next_token=None, + instance_status_filter=None): + """ + Lists the Amazon EC2 instances for a deployment within the AWS + user account. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployment instances call, which can be used to return the + next set of deployment instances in the list. + + :type instance_status_filter: list + :param instance_status_filter: + A subset of instances to list, by status: + + + + Pending: Include in the resulting list those instances with pending + deployments. + + InProgress: Include in the resulting list those instances with in- + progress deployments. + + Succeeded: Include in the resulting list those instances with + succeeded deployments. + + Failed: Include in the resulting list those instances with failed + deployments. + + Skipped: Include in the resulting list those instances with skipped + deployments. + + Unknown: Include in the resulting list those instances with + deployments in an unknown state. + + """ + params = {'deploymentId': deployment_id, } + if next_token is not None: + params['nextToken'] = next_token + if instance_status_filter is not None: + params['instanceStatusFilter'] = instance_status_filter + return self.make_request(action='ListDeploymentInstances', + body=json.dumps(params)) + + def list_deployments(self, application_name=None, + deployment_group_name=None, + include_only_statuses=None, create_time_range=None, + next_token=None): + """ + Lists the deployments under a deployment group for an + application registered within the AWS user account. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type deployment_group_name: string + :param deployment_group_name: The name of an existing deployment group + for the specified application. + + :type include_only_statuses: list + :param include_only_statuses: A subset of deployments to list, by + status: + + + Created: Include in the resulting list created deployments. + + Queued: Include in the resulting list queued deployments. + + In Progress: Include in the resulting list in-progress deployments. + + Succeeded: Include in the resulting list succeeded deployments. + + Failed: Include in the resulting list failed deployments. + + Aborted: Include in the resulting list aborted deployments. + + :type create_time_range: dict + :param create_time_range: A deployment creation start- and end-time + range for returning a subset of the list of deployments. + + :type next_token: string + :param next_token: An identifier that was returned from the previous + list deployments call, which can be used to return the next set of + deployments in the list. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if deployment_group_name is not None: + params['deploymentGroupName'] = deployment_group_name + if include_only_statuses is not None: + params['includeOnlyStatuses'] = include_only_statuses + if create_time_range is not None: + params['createTimeRange'] = create_time_range + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='ListDeployments', + body=json.dumps(params)) + + def register_application_revision(self, application_name, revision, + description=None): + """ + Registers with AWS CodeDeploy a revision for the specified + application. + + :type application_name: string + :param application_name: The name of an existing AWS CodeDeploy + application within the AWS user account. + + :type description: string + :param description: A comment about the revision. + + :type revision: dict + :param revision: Information about the application revision to + register, including the revision's type and its location. + + """ + params = { + 'applicationName': application_name, + 'revision': revision, + } + if description is not None: + params['description'] = description + return self.make_request(action='RegisterApplicationRevision', + body=json.dumps(params)) + + def stop_deployment(self, deployment_id): + """ + Attempts to stop an ongoing deployment. + + :type deployment_id: string + :param deployment_id: The unique ID of a deployment. + + """ + params = {'deploymentId': deployment_id, } + return self.make_request(action='StopDeployment', + body=json.dumps(params)) + + def update_application(self, application_name=None, + new_application_name=None): + """ + Changes an existing application's name. + + :type application_name: string + :param application_name: The current name of the application that you + want to change. + + :type new_application_name: string + :param new_application_name: The new name that you want to change the + application to. + + """ + params = {} + if application_name is not None: + params['applicationName'] = application_name + if new_application_name is not None: + params['newApplicationName'] = new_application_name + return self.make_request(action='UpdateApplication', + body=json.dumps(params)) + + def update_deployment_group(self, application_name, + current_deployment_group_name, + new_deployment_group_name=None, + deployment_config_name=None, + ec_2_tag_filters=None, + auto_scaling_groups=None, + service_role_arn=None): + """ + Changes information about an existing deployment group. + + :type application_name: string + :param application_name: The application name corresponding to the + deployment group to update. + + :type current_deployment_group_name: string + :param current_deployment_group_name: The current name of the existing + deployment group. + + :type new_deployment_group_name: string + :param new_deployment_group_name: The new name of the deployment group, + if you want to change it. + + :type deployment_config_name: string + :param deployment_config_name: The replacement deployment configuration + name to use, if you want to change it. + + :type ec_2_tag_filters: list + :param ec_2_tag_filters: The replacement set of Amazon EC2 tags to + filter on, if you want to change them. + + :type auto_scaling_groups: list + :param auto_scaling_groups: The replacement list of Auto Scaling groups + to be included in the deployment group, if you want to change them. + + :type service_role_arn: string + :param service_role_arn: A replacement service role's ARN, if you want + to change it. + + """ + params = { + 'applicationName': application_name, + 'currentDeploymentGroupName': current_deployment_group_name, + } + if new_deployment_group_name is not None: + params['newDeploymentGroupName'] = new_deployment_group_name + if deployment_config_name is not None: + params['deploymentConfigName'] = deployment_config_name + if ec_2_tag_filters is not None: + params['ec2TagFilters'] = ec_2_tag_filters + if auto_scaling_groups is not None: + params['autoScalingGroups'] = auto_scaling_groups + if service_role_arn is not None: + params['serviceRoleArn'] = service_role_arn + return self.make_request(action='UpdateDeploymentGroup', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/cognito/__init__.py b/ext/boto/cognito/__init__.py new file mode 100644 index 0000000000..70cc23febf --- /dev/null +++ b/ext/boto/cognito/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/ext/boto/cognito/identity/__init__.py b/ext/boto/cognito/identity/__init__.py new file mode 100644 index 0000000000..fff29b42d8 --- /dev/null +++ b/ext/boto/cognito/identity/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Cognito Identity service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cognito.identity.layer1 import CognitoIdentityConnection + return get_regions('cognito-identity', + connection_cls=CognitoIdentityConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cognito.identity.layer1 import CognitoIdentityConnection + return connect('cognito-identity', region_name, + connection_cls=CognitoIdentityConnection, **kw_params) diff --git a/ext/boto/cognito/identity/exceptions.py b/ext/boto/cognito/identity/exceptions.py new file mode 100644 index 0000000000..b5c1236d7e --- /dev/null +++ b/ext/boto/cognito/identity/exceptions.py @@ -0,0 +1,44 @@ +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class ResourceConflictException(BotoServerError): + pass + + +class DeveloperUserAlreadyRegisteredException(BotoServerError): + pass + + +class TooManyRequestsException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InternalErrorException(BotoServerError): + pass + + +class NotAuthorizedException(BotoServerError): + pass diff --git a/ext/boto/cognito/identity/layer1.py b/ext/boto/cognito/identity/layer1.py new file mode 100644 index 0000000000..a7363d5b5c --- /dev/null +++ b/ext/boto/cognito/identity/layer1.py @@ -0,0 +1,549 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cognito.identity import exceptions + + +class CognitoIdentityConnection(AWSQueryConnection): + """ + Amazon Cognito + Amazon Cognito is a web service that delivers scoped temporary + credentials to mobile devices and other untrusted environments. + Amazon Cognito uniquely identifies a device and supplies the user + with a consistent identity over the lifetime of an application. + + Using Amazon Cognito, you can enable authentication with one or + more third-party identity providers (Facebook, Google, or Login + with Amazon), and you can also choose to support unauthenticated + access from your app. Cognito delivers a unique identifier for + each user and acts as an OpenID token provider trusted by AWS + Security Token Service (STS) to access temporary, limited- + privilege AWS credentials. + + To provide end-user credentials, first make an unsigned call to + GetId. If the end user is authenticated with one of the supported + identity providers, set the `Logins` map with the identity + provider token. `GetId` returns a unique identifier for the user. + + Next, make an unsigned call to GetOpenIdToken, which returns the + OpenID token necessary to call STS and retrieve AWS credentials. + This call expects the same `Logins` map as the `GetId` call, as + well as the `IdentityID` originally returned by `GetId`. The token + returned by `GetOpenIdToken` can be passed to the STS operation + `AssumeRoleWithWebIdentity`_ to retrieve AWS credentials. + """ + APIVersion = "2014-06-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cognito-identity.us-east-1.amazonaws.com" + ServiceName = "CognitoIdentity" + TargetPrefix = "AWSCognitoIdentityService" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "ResourceConflictException": exceptions.ResourceConflictException, + "DeveloperUserAlreadyRegisteredException": exceptions.DeveloperUserAlreadyRegisteredException, + "TooManyRequestsException": exceptions.TooManyRequestsException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalErrorException": exceptions.InternalErrorException, + "NotAuthorizedException": exceptions.NotAuthorizedException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CognitoIdentityConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_identity_pool(self, identity_pool_name, + allow_unauthenticated_identities, + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): + """ + Creates a new identity pool. The identity pool is a store of + user identity information that is specific to your AWS + account. The limit on identity pools is 60 per account. + + :type identity_pool_name: string + :param identity_pool_name: A string that you provide. + + :type allow_unauthenticated_identities: boolean + :param allow_unauthenticated_identities: TRUE if the identity pool + supports unauthenticated logins. + + :type supported_login_providers: map + :param supported_login_providers: Optional key:value pairs mapping + provider names to provider app IDs. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This name acts as a placeholder that allows + your backend and the Cognito service to communicate about the + developer provider. For the `DeveloperProviderName`, you can use + letters as well as period ( `.`), underscore ( `_`), and dash ( + `-`). + Once you have set a developer provider name, you cannot change it. + Please take care in setting this parameter. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + + """ + params = { + 'IdentityPoolName': identity_pool_name, + 'AllowUnauthenticatedIdentities': allow_unauthenticated_identities, + } + if supported_login_providers is not None: + params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns + return self.make_request(action='CreateIdentityPool', + body=json.dumps(params)) + + def delete_identity_pool(self, identity_pool_id): + """ + Deletes a user pool. Once a pool is deleted, users will not be + able to authenticate with the pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = {'IdentityPoolId': identity_pool_id, } + return self.make_request(action='DeleteIdentityPool', + body=json.dumps(params)) + + def describe_identity_pool(self, identity_pool_id): + """ + Gets details about a particular identity pool, including the + pool name, ID description, creation date, and current number + of users. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = {'IdentityPoolId': identity_pool_id, } + return self.make_request(action='DescribeIdentityPool', + body=json.dumps(params)) + + def get_id(self, account_id, identity_pool_id, logins=None): + """ + Generates (or retrieves) a Cognito ID. Supplying multiple + logins will create an implicit linked account. + + :type account_id: string + :param account_id: A standard AWS account ID (9+ digits). + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + The available provider names for `Logins` are as follows: + + + Facebook: `graph.facebook.com` + + Google: `accounts.google.com` + + Amazon: `www.amazon.com` + + """ + params = { + 'AccountId': account_id, + 'IdentityPoolId': identity_pool_id, + } + if logins is not None: + params['Logins'] = logins + return self.make_request(action='GetId', + body=json.dumps(params)) + + def get_open_id_token(self, identity_id, logins=None): + """ + Gets an OpenID token, using a known Cognito ID. This known + Cognito ID is returned by GetId. You can optionally add + additional logins for the identity. Supplying multiple logins + creates an implicit link. + + The OpenId token is valid for 15 minutes. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + + """ + params = {'IdentityId': identity_id, } + if logins is not None: + params['Logins'] = logins + return self.make_request(action='GetOpenIdToken', + body=json.dumps(params)) + + def get_open_id_token_for_developer_identity(self, identity_pool_id, + logins, identity_id=None, + token_duration=None): + """ + Registers (or retrieves) a Cognito `IdentityId` and an OpenID + Connect token for a user authenticated by your backend + authentication process. Supplying multiple logins will create + an implicit linked account. You can only specify one developer + provider as part of the `Logins` map, which is linked to the + identity pool. The developer provider is the "domain" by which + Cognito will refer to your users. + + You can use `GetOpenIdTokenForDeveloperIdentity` to create a + new identity and to link new logins (that is, user credentials + issued by a public provider or developer provider) to an + existing identity. When you want to create a new identity, the + `IdentityId` should be null. When you want to associate a new + login with an existing authenticated/unauthenticated identity, + you can do so by providing the existing `IdentityId`. This API + will create the identity in the specified `IdentityPoolId`. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. Each name-value pair represents a user + from a public provider or developer provider. If the user is from a + developer provider, the name-value pair will follow the syntax + `"developer_provider_name": "developer_user_identifier"`. The + developer provider is the "domain" by which Cognito will refer to + your users; you provided this domain while creating/updating the + identity pool. The developer user identifier is an identifier from + your backend that uniquely identifies a user. When you create an + identity pool, you can specify the supported logins. + + :type token_duration: long + :param token_duration: The expiration time of the token, in seconds. + You can specify a custom expiration time for the token so that you + can cache it. If you don't provide an expiration time, the token is + valid for 15 minutes. You can exchange the token with Amazon STS + for temporary AWS credentials, which are valid for a maximum of one + hour. The maximum token duration you can set is 24 hours. You + should take care in setting the expiration time for a token, as + there are significant security implications: an attacker could use + a leaked token to access your AWS resources for the token's + duration. + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'Logins': logins, + } + if identity_id is not None: + params['IdentityId'] = identity_id + if token_duration is not None: + params['TokenDuration'] = token_duration + return self.make_request(action='GetOpenIdTokenForDeveloperIdentity', + body=json.dumps(params)) + + def list_identities(self, identity_pool_id, max_results, next_token=None): + """ + Lists the identities in a pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'MaxResults': max_results, + } + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListIdentities', + body=json.dumps(params)) + + def list_identity_pools(self, max_results, next_token=None): + """ + Lists all of the Cognito identity pools registered for your + account. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. + + """ + params = {'MaxResults': max_results, } + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='ListIdentityPools', + body=json.dumps(params)) + + def lookup_developer_identity(self, identity_pool_id, identity_id=None, + developer_user_identifier=None, + max_results=None, next_token=None): + """ + Retrieves the `IdentityID` associated with a + `DeveloperUserIdentifier` or the list of + `DeveloperUserIdentifier`s associated with an `IdentityId` for + an existing identity. Either `IdentityID` or + `DeveloperUserIdentifier` must not be null. If you supply only + one of these values, the other value will be searched in the + database and returned as a part of the response. If you supply + both, `DeveloperUserIdentifier` will be matched against + `IdentityID`. If the values are verified against the database, + the response returns both values and is the same as the + request. Otherwise a `ResourceConflictException` is thrown. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. Typically, a developer + identity provider would issue many developer user identifiers, in + keeping with the number of users. + + :type max_results: integer + :param max_results: The maximum number of identities to return. + + :type next_token: string + :param next_token: A pagination token. The first call you make will + have `NextToken` set to null. After that the service will return + `NextToken` values as needed. For example, let's say you make a + request with `MaxResults` set to 10, and there are 20 matches in + the database. The service will return a pagination token as a part + of the response. This token can be used to call the API again and + get results starting from the 11th match. + + """ + params = {'IdentityPoolId': identity_pool_id, } + if identity_id is not None: + params['IdentityId'] = identity_id + if developer_user_identifier is not None: + params['DeveloperUserIdentifier'] = developer_user_identifier + if max_results is not None: + params['MaxResults'] = max_results + if next_token is not None: + params['NextToken'] = next_token + return self.make_request(action='LookupDeveloperIdentity', + body=json.dumps(params)) + + def merge_developer_identities(self, source_user_identifier, + destination_user_identifier, + developer_provider_name, identity_pool_id): + """ + Merges two users having different `IdentityId`s, existing in + the same identity pool, and identified by the same developer + provider. You can use this action to request that discrete + users be merged and identified as a single user in the Cognito + environment. Cognito associates the given source user ( + `SourceUserIdentifier`) with the `IdentityId` of the + `DestinationUserIdentifier`. Only developer-authenticated + users can be merged. If the users to be merged are associated + with the same public provider, but as two different users, an + exception will be thrown. + + :type source_user_identifier: string + :param source_user_identifier: User identifier for the source user. The + value should be a `DeveloperUserIdentifier`. + + :type destination_user_identifier: string + :param destination_user_identifier: User identifier for the destination + user. The value should be a `DeveloperUserIdentifier`. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. This is a (pseudo) domain name that you + provide while creating an identity pool. This name acts as a + placeholder that allows your backend and the Cognito service to + communicate about the developer provider. For the + `DeveloperProviderName`, you can use letters as well as period (.), + underscore (_), and dash (-). + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + """ + params = { + 'SourceUserIdentifier': source_user_identifier, + 'DestinationUserIdentifier': destination_user_identifier, + 'DeveloperProviderName': developer_provider_name, + 'IdentityPoolId': identity_pool_id, + } + return self.make_request(action='MergeDeveloperIdentities', + body=json.dumps(params)) + + def unlink_developer_identity(self, identity_id, identity_pool_id, + developer_provider_name, + developer_user_identifier): + """ + Unlinks a `DeveloperUserIdentifier` from an existing identity. + Unlinked developer users will be considered new identities + next time they are seen. If, for a given Cognito identity, you + remove all federated identities as well as the developer user + identifier, the Cognito identity becomes inaccessible. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type developer_user_identifier: string + :param developer_user_identifier: A unique ID used by your backend + authentication process to identify a user. + + """ + params = { + 'IdentityId': identity_id, + 'IdentityPoolId': identity_pool_id, + 'DeveloperProviderName': developer_provider_name, + 'DeveloperUserIdentifier': developer_user_identifier, + } + return self.make_request(action='UnlinkDeveloperIdentity', + body=json.dumps(params)) + + def unlink_identity(self, identity_id, logins, logins_to_remove): + """ + Unlinks a federated identity from an existing account. + Unlinked logins will be considered new identities next time + they are seen. Removing the last linked login will make this + identity inaccessible. + + :type identity_id: string + :param identity_id: A unique identifier in the format REGION:GUID. + + :type logins: map + :param logins: A set of optional name-value pairs that map provider + names to provider tokens. + + :type logins_to_remove: list + :param logins_to_remove: Provider names to unlink from this identity. + + """ + params = { + 'IdentityId': identity_id, + 'Logins': logins, + 'LoginsToRemove': logins_to_remove, + } + return self.make_request(action='UnlinkIdentity', + body=json.dumps(params)) + + def update_identity_pool(self, identity_pool_id, identity_pool_name, + allow_unauthenticated_identities, + supported_login_providers=None, + developer_provider_name=None, + open_id_connect_provider_ar_ns=None): + """ + Updates a user pool. + + :type identity_pool_id: string + :param identity_pool_id: An identity pool ID in the format REGION:GUID. + + :type identity_pool_name: string + :param identity_pool_name: A string that you provide. + + :type allow_unauthenticated_identities: boolean + :param allow_unauthenticated_identities: TRUE if the identity pool + supports unauthenticated logins. + + :type supported_login_providers: map + :param supported_login_providers: Optional key:value pairs mapping + provider names to provider app IDs. + + :type developer_provider_name: string + :param developer_provider_name: The "domain" by which Cognito will + refer to your users. + + :type open_id_connect_provider_ar_ns: list + :param open_id_connect_provider_ar_ns: + + """ + params = { + 'IdentityPoolId': identity_pool_id, + 'IdentityPoolName': identity_pool_name, + 'AllowUnauthenticatedIdentities': allow_unauthenticated_identities, + } + if supported_login_providers is not None: + params['SupportedLoginProviders'] = supported_login_providers + if developer_provider_name is not None: + params['DeveloperProviderName'] = developer_provider_name + if open_id_connect_provider_ar_ns is not None: + params['OpenIdConnectProviderARNs'] = open_id_connect_provider_ar_ns + return self.make_request(action='UpdateIdentityPool', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/cognito/sync/__init__.py b/ext/boto/cognito/sync/__init__.py new file mode 100644 index 0000000000..f8183ad943 --- /dev/null +++ b/ext/boto/cognito/sync/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Cognito Sync service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cognito.sync.layer1 import CognitoSyncConnection + return get_regions('cognito-sync', connection_cls=CognitoSyncConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.cognito.sync.layer1 import CognitoSyncConnection + return connect('cognito-sync', region_name, + connection_cls=CognitoSyncConnection, **kw_params) diff --git a/ext/boto/cognito/sync/exceptions.py b/ext/boto/cognito/sync/exceptions.py new file mode 100644 index 0000000000..3e83c3ca8f --- /dev/null +++ b/ext/boto/cognito/sync/exceptions.py @@ -0,0 +1,54 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class ResourceConflictException(BotoServerError): + pass + + +class InvalidConfigurationException(BotoServerError): + pass + + +class TooManyRequestsException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InternalErrorException(BotoServerError): + pass + + +class NotAuthorizedException(BotoServerError): + pass diff --git a/ext/boto/cognito/sync/layer1.py b/ext/boto/cognito/sync/layer1.py new file mode 100644 index 0000000000..59e9d953cd --- /dev/null +++ b/ext/boto/cognito/sync/layer1.py @@ -0,0 +1,494 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.cognito.sync import exceptions + + +class CognitoSyncConnection(AWSAuthConnection): + """ + Amazon Cognito Sync + Amazon Cognito Sync provides an AWS service and client library + that enable cross-device syncing of application-related user data. + High-level client libraries are available for both iOS and + Android. You can use these libraries to persist data locally so + that it's available even if the device is offline. Developer + credentials don't need to be stored on the mobile device to access + the service. You can use Amazon Cognito to obtain a normalized + user ID and credentials. User data is persisted in a dataset that + can store up to 1 MB of key-value pairs, and you can have up to 20 + datasets per user identity. + + With Amazon Cognito Sync, the data stored for each identity is + accessible only to credentials assigned to that identity. In order + to use the Cognito Sync service, you need to make API calls using + credentials retrieved with `Amazon Cognito Identity service`_. + """ + APIVersion = "2014-06-30" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cognito-sync.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "ResourceConflictException": exceptions.ResourceConflictException, + "InvalidConfigurationException": exceptions.InvalidConfigurationException, + "TooManyRequestsException": exceptions.TooManyRequestsException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalErrorException": exceptions.InternalErrorException, + "NotAuthorizedException": exceptions.NotAuthorizedException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(CognitoSyncConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def delete_dataset(self, identity_pool_id, identity_id, dataset_name): + """ + Deletes the specific dataset. The dataset will be deleted + permanently, and the action can't be undone. Datasets that + this dataset was merged with will no longer report the merge. + Any consequent operation on this dataset will result in a + ResourceNotFoundException. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + return self.make_request('DELETE', uri, expected_status=200) + + def describe_dataset(self, identity_pool_id, identity_id, dataset_name): + """ + Gets metadata about a dataset by identity and dataset name. + The credentials used to make this API call need to have access + to the identity data. With Amazon Cognito Sync, each identity + has access only to its own data. You should use Amazon Cognito + Identity service to retrieve the credentials necessary to make + this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + return self.make_request('GET', uri, expected_status=200) + + def describe_identity_pool_usage(self, identity_pool_id): + """ + Gets usage details (for example, data storage) about a + particular identity pool. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + """ + + uri = '/identitypools/{0}'.format(identity_pool_id) + return self.make_request('GET', uri, expected_status=200) + + def describe_identity_usage(self, identity_pool_id, identity_id): + """ + Gets usage information for an identity, including number of + datasets and data usage. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + """ + + uri = '/identitypools/{0}/identities/{1}'.format( + identity_pool_id, identity_id) + return self.make_request('GET', uri, expected_status=200) + + def get_identity_pool_configuration(self, identity_pool_id): + """ + Gets the configuration settings of an identity pool. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool for which to return a + configuration. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + return self.make_request('GET', uri, expected_status=200) + + def list_datasets(self, identity_pool_id, identity_id, next_token=None, + max_results=None): + """ + Lists datasets for an identity. The credentials used to make + this API call need to have access to the identity data. With + Amazon Cognito Sync, each identity has access only to its own + data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets'.format( + identity_pool_id, identity_id) + params = {} + headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_identity_pool_usage(self, next_token=None, max_results=None): + """ + Gets a list of identity pools registered with Cognito. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + """ + + uri = '/identitypools' + params = {} + headers = {} + query_params = {} + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def list_records(self, identity_pool_id, identity_id, dataset_name, + last_sync_count=None, next_token=None, max_results=None, + sync_session_token=None): + """ + Gets paginated records, optionally changed after a particular + sync count for a dataset and identity. The credentials used to + make this API call need to have access to the identity data. + With Amazon Cognito Sync, each identity has access only to its + own data. You should use Amazon Cognito Identity service to + retrieve the credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + :type last_sync_count: long + :param last_sync_count: The last server sync count for this record. + + :type next_token: string + :param next_token: A pagination token for obtaining the next page of + results. + + :type max_results: integer + :param max_results: The maximum number of results to be returned. + + :type sync_session_token: string + :param sync_session_token: A token containing a session ID, identity + ID, and expiration. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/records'.format( + identity_pool_id, identity_id, dataset_name) + params = {} + headers = {} + query_params = {} + if last_sync_count is not None: + query_params['lastSyncCount'] = last_sync_count + if next_token is not None: + query_params['nextToken'] = next_token + if max_results is not None: + query_params['maxResults'] = max_results + if sync_session_token is not None: + query_params['syncSessionToken'] = sync_session_token + return self.make_request('GET', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def register_device(self, identity_pool_id, identity_id, platform, token): + """ + Registers a device to receive push sync notifications. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. Here, the ID of the pool that the identity belongs to. + + :type identity_id: string + :param identity_id: The unique ID for this identity. + + :type platform: string + :param platform: The SNS platform type (e.g. GCM, SDM, APNS, + APNS_SANDBOX). + + :type token: string + :param token: The push token. + + """ + + uri = '/identitypools/{0}/identity/{1}/device'.format( + identity_pool_id, identity_id) + params = {'Platform': platform, 'Token': token, } + headers = {} + query_params = {} + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def set_identity_pool_configuration(self, identity_pool_id, + push_sync=None): + """ + Sets the necessary configuration for push sync. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. This is the ID of the pool to modify. + + :type push_sync: dict + :param push_sync: Configuration options to be applied to the identity + pool. + + """ + + uri = '/identitypools/{0}/configuration'.format(identity_pool_id) + params = {} + headers = {} + query_params = {} + if push_sync is not None: + params['PushSync'] = push_sync + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def subscribe_to_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Subscribes to receive notifications when a dataset is modified + by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which the identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset to subcribe to. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('POST', uri, expected_status=200) + + def unsubscribe_from_dataset(self, identity_pool_id, identity_id, + dataset_name, device_id): + """ + Unsubscribe from receiving notifications when a dataset is + modified by another device. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. The ID of the pool to which this identity belongs. + + :type identity_id: string + :param identity_id: Unique ID for this identity. + + :type dataset_name: string + :param dataset_name: The name of the dataset from which to unsubcribe. + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}/subscriptions/{3}'.format( + identity_pool_id, identity_id, dataset_name, device_id) + return self.make_request('DELETE', uri, expected_status=200) + + def update_records(self, identity_pool_id, identity_id, dataset_name, + sync_session_token, device_id=None, + record_patches=None, client_context=None): + """ + Posts updates to records and add and delete records for a + dataset and user. The credentials used to make this API call + need to have access to the identity data. With Amazon Cognito + Sync, each identity has access only to its own data. You + should use Amazon Cognito Identity service to retrieve the + credentials necessary to make this API call. + + :type identity_pool_id: string + :param identity_pool_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type identity_id: string + :param identity_id: A name-spaced GUID (for example, us- + east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon + Cognito. GUID generation is unique within a region. + + :type dataset_name: string + :param dataset_name: A string of up to 128 characters. Allowed + characters are a-z, A-Z, 0-9, '_' (underscore), '-' (dash), and '.' + (dot). + + :type device_id: string + :param device_id: The unique ID generated for this device by Cognito. + + :type record_patches: list + :param record_patches: A list of patch operations. + + :type sync_session_token: string + :param sync_session_token: The SyncSessionToken returned by a previous + call to ListRecords for this dataset and identity. + + :type client_context: string + :param client_context: Intended to supply a device ID that will + populate the `lastModifiedBy` field referenced in other methods. + The `ClientContext` field is not yet implemented. + + """ + + uri = '/identitypools/{0}/identities/{1}/datasets/{2}'.format( + identity_pool_id, identity_id, dataset_name) + params = {'SyncSessionToken': sync_session_token, } + headers = {} + query_params = {} + if device_id is not None: + params['DeviceId'] = device_id + if record_patches is not None: + params['RecordPatches'] = record_patches + if client_context is not None: + headers['x-amz-Client-Context'] = client_context + if client_context is not None: + headers['x-amz-Client-Context'] = client_context + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params), headers=headers, + params=query_params) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = AWSAuthConnection.make_request( + self, verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/ext/boto/compat.py b/ext/boto/compat.py new file mode 100644 index 0000000000..d257180307 --- /dev/null +++ b/ext/boto/compat.py @@ -0,0 +1,102 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os + +# This allows boto modules to say "from boto.compat import json". This is +# preferred so that all modules don't have to repeat this idiom. +try: + import simplejson as json +except ImportError: + import json + + +# Switch to use encodebytes, which deprecates encodestring in Python 3 +try: + from base64 import encodebytes +except ImportError: + from base64 import encodestring as encodebytes + + +# If running in Google App Engine there is no "user" and +# os.path.expanduser() will fail. Attempt to detect this case and use a +# no-op expanduser function in this case. +try: + os.path.expanduser('~') + expanduser = os.path.expanduser +except (AttributeError, ImportError): + # This is probably running on App Engine. + expanduser = (lambda x: x) + +from boto.vendored import six + +from boto.vendored.six import BytesIO, StringIO +from boto.vendored.six.moves import filter, http_client, map, _thread, \ + urllib, zip +from boto.vendored.six.moves.queue import Queue +from boto.vendored.six.moves.urllib.parse import parse_qs, quote, unquote, \ + urlparse, urlsplit +from boto.vendored.six.moves.urllib.parse import unquote_plus +from boto.vendored.six.moves.urllib.request import urlopen + +if six.PY3: + # StandardError was removed, so use the base exception type instead + StandardError = Exception + long_type = int + from configparser import ConfigParser, NoOptionError, NoSectionError + unquote_str = unquote_plus + parse_qs_safe = parse_qs +else: + StandardError = StandardError + long_type = long + from ConfigParser import SafeConfigParser as ConfigParser + from ConfigParser import NoOptionError, NoSectionError + + def unquote_str(value, encoding='utf-8'): + # In python2, unquote() gives us a string back that has the urldecoded + # bits, but not the unicode parts. We need to decode this manually. + # unquote has special logic in which if it receives a unicode object it + # will decode it to latin1. This is hard coded. To avoid this, we'll + # encode the string with the passed in encoding before trying to + # unquote it. + byte_string = value.encode(encoding) + return unquote_plus(byte_string).decode(encoding) + + # These are the same default arguments for python3's + # urllib.parse.parse_qs. + def parse_qs_safe(qs, keep_blank_values=False, strict_parsing=False, + encoding='utf-8', errors='replace'): + """Parse a query handling unicode arguments properly in Python 2.""" + is_text_type = isinstance(qs, six.text_type) + if is_text_type: + # URL encoding uses ASCII code points only. + qs = qs.encode('ascii') + qs_dict = parse_qs(qs, keep_blank_values, strict_parsing) + if is_text_type: + # Decode the parsed dictionary back to unicode. + result = {} + for (name, value) in qs_dict.items(): + decoded_name = name.decode(encoding, errors) + decoded_value = [item.decode(encoding, errors) + for item in value] + result[decoded_name] = decoded_value + return result + return qs_dict diff --git a/ext/boto/configservice/__init__.py b/ext/boto/configservice/__init__.py new file mode 100644 index 0000000000..62b3ea2420 --- /dev/null +++ b/ext/boto/configservice/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Config service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.configservice.layer1 import ConfigServiceConnection + return get_regions('configservice', connection_cls=ConfigServiceConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.configservice.layer1 import ConfigServiceConnection + return connect('configservice', region_name, + connection_cls=ConfigServiceConnection, **kw_params) diff --git a/ext/boto/configservice/exceptions.py b/ext/boto/configservice/exceptions.py new file mode 100644 index 0000000000..58aa550f9c --- /dev/null +++ b/ext/boto/configservice/exceptions.py @@ -0,0 +1,103 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InvalidLimitException(BotoServerError): + pass + + +class NoSuchBucketException(BotoServerError): + pass + + +class InvalidSNSTopicARNException(BotoServerError): + pass + + +class ResourceNotDiscoveredException(BotoServerError): + pass + + +class MaxNumberOfDeliveryChannelsExceededException(BotoServerError): + pass + + +class LastDeliveryChannelDeleteFailedException(BotoServerError): + pass + + +class InsufficientDeliveryPolicyException(BotoServerError): + pass + + +class InvalidRoleException(BotoServerError): + pass + + +class InvalidTimeRangeException(BotoServerError): + pass + + +class NoSuchDeliveryChannelException(BotoServerError): + pass + + +class NoSuchConfigurationRecorderException(BotoServerError): + pass + + +class InvalidS3KeyPrefixException(BotoServerError): + pass + + +class InvalidDeliveryChannelNameException(BotoServerError): + pass + + +class NoRunningConfigurationRecorderException(BotoServerError): + pass + + +class ValidationException(BotoServerError): + pass + + +class NoAvailableConfigurationRecorderException(BotoServerError): + pass + + +class InvalidNextTokenException(BotoServerError): + pass + + +class InvalidConfigurationRecorderNameException(BotoServerError): + pass + + +class NoAvailableDeliveryChannelException(BotoServerError): + pass + + +class MaxNumberOfConfigurationRecordersExceededException(BotoServerError): + pass diff --git a/ext/boto/configservice/layer1.py b/ext/boto/configservice/layer1.py new file mode 100644 index 0000000000..d768b66ce8 --- /dev/null +++ b/ext/boto/configservice/layer1.py @@ -0,0 +1,385 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.configservice import exceptions + + +class ConfigServiceConnection(AWSQueryConnection): + """ + AWS Config + AWS Config provides a way to keep track of the configurations of + all the AWS resources associated with your AWS account. You can + use AWS Config to get the current and historical configurations of + each AWS resource and also to get information about the + relationship between the resources. An AWS resource can be an + Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store + (EBS) volume, an Elastic network Interface (ENI), or a security + group. For a complete list of resources currently supported by AWS + Config, see `Supported AWS Resources`_. + + You can access and manage AWS Config through the AWS Management + Console, the AWS Command Line Interface (AWS CLI), the AWS Config + API, or the AWS SDKs for AWS Config + + This reference guide contains documentation for the AWS Config API + and the AWS CLI commands that you can use to manage AWS Config. + + The AWS Config API uses the Signature Version 4 protocol for + signing requests. For more information about how to sign a request + with this protocol, see `Signature Version 4 Signing Process`_. + + For detailed information about AWS Config features and their + associated actions or commands, as well as how to work with AWS + Management Console, see `What Is AWS Config?`_ in the AWS Config + Developer Guide . + """ + APIVersion = "2014-11-12" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "config.us-east-1.amazonaws.com" + ServiceName = "ConfigService" + TargetPrefix = "StarlingDoveService" + ResponseError = JSONResponseError + + _faults = { + "InvalidLimitException": exceptions.InvalidLimitException, + "NoSuchBucketException": exceptions.NoSuchBucketException, + "InvalidSNSTopicARNException": exceptions.InvalidSNSTopicARNException, + "ResourceNotDiscoveredException": exceptions.ResourceNotDiscoveredException, + "MaxNumberOfDeliveryChannelsExceededException": exceptions.MaxNumberOfDeliveryChannelsExceededException, + "LastDeliveryChannelDeleteFailedException": exceptions.LastDeliveryChannelDeleteFailedException, + "InsufficientDeliveryPolicyException": exceptions.InsufficientDeliveryPolicyException, + "InvalidRoleException": exceptions.InvalidRoleException, + "InvalidTimeRangeException": exceptions.InvalidTimeRangeException, + "NoSuchDeliveryChannelException": exceptions.NoSuchDeliveryChannelException, + "NoSuchConfigurationRecorderException": exceptions.NoSuchConfigurationRecorderException, + "InvalidS3KeyPrefixException": exceptions.InvalidS3KeyPrefixException, + "InvalidDeliveryChannelNameException": exceptions.InvalidDeliveryChannelNameException, + "NoRunningConfigurationRecorderException": exceptions.NoRunningConfigurationRecorderException, + "ValidationException": exceptions.ValidationException, + "NoAvailableConfigurationRecorderException": exceptions.NoAvailableConfigurationRecorderException, + "InvalidNextTokenException": exceptions.InvalidNextTokenException, + "InvalidConfigurationRecorderNameException": exceptions.InvalidConfigurationRecorderNameException, + "NoAvailableDeliveryChannelException": exceptions.NoAvailableDeliveryChannelException, + "MaxNumberOfConfigurationRecordersExceededException": exceptions.MaxNumberOfConfigurationRecordersExceededException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(ConfigServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def delete_delivery_channel(self, delivery_channel_name): + """ + Deletes the specified delivery channel. + + The delivery channel cannot be deleted if it is the only + delivery channel and the configuration recorder is still + running. To delete the delivery channel, stop the running + configuration recorder using the StopConfigurationRecorder + action. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel to + delete. + + """ + params = {'DeliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeleteDeliveryChannel', + body=json.dumps(params)) + + def deliver_config_snapshot(self, delivery_channel_name): + """ + Schedules delivery of a configuration snapshot to the Amazon + S3 bucket in the specified delivery channel. After the + delivery has started, AWS Config sends following notifications + using an Amazon SNS topic that you have specified. + + + + Notification of starting the delivery. + + Notification of delivery completed, if the delivery was + successfully completed. + + Notification of delivery failure, if the delivery failed to + complete. + + :type delivery_channel_name: string + :param delivery_channel_name: The name of the delivery channel through + which the snapshot is delivered. + + """ + params = {'deliveryChannelName': delivery_channel_name, } + return self.make_request(action='DeliverConfigSnapshot', + body=json.dumps(params)) + + def describe_configuration_recorder_status(self, + configuration_recorder_names=None): + """ + Returns the current status of the specified configuration + recorder. If a configuration recorder is not specified, this + action returns the status of all configuration recorder + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: The name(s) of the configuration + recorder. If the name is not specified, the action returns the + current status of all the configuration recorders associated with + the account. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorderStatus', + body=json.dumps(params)) + + def describe_configuration_recorders(self, + configuration_recorder_names=None): + """ + Returns the name of one or more specified configuration + recorders. If the recorder name is not specified, this action + returns the names of all the configuration recorders + associated with the account. + + :type configuration_recorder_names: list + :param configuration_recorder_names: A list of configuration recorder + names. + + """ + params = {} + if configuration_recorder_names is not None: + params['ConfigurationRecorderNames'] = configuration_recorder_names + return self.make_request(action='DescribeConfigurationRecorders', + body=json.dumps(params)) + + def describe_delivery_channel_status(self, delivery_channel_names=None): + """ + Returns the current status of the specified delivery channel. + If a delivery channel is not specified, this action returns + the current status of all delivery channels associated with + the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannelStatus', + body=json.dumps(params)) + + def describe_delivery_channels(self, delivery_channel_names=None): + """ + Returns details about the specified delivery channel. If a + delivery channel is not specified, this action returns the + details of all delivery channels associated with the account. + + :type delivery_channel_names: list + :param delivery_channel_names: A list of delivery channel names. + + """ + params = {} + if delivery_channel_names is not None: + params['DeliveryChannelNames'] = delivery_channel_names + return self.make_request(action='DescribeDeliveryChannels', + body=json.dumps(params)) + + def get_resource_config_history(self, resource_type, resource_id, + later_time=None, earlier_time=None, + chronological_order=None, limit=None, + next_token=None): + """ + Returns a list of configuration items for the specified + resource. The list contains details about each state of the + resource during the specified time interval. You can specify a + `limit` on the number of results returned on the page. If a + limit is specified, a `nextToken` is returned as part of the + result that you can use to continue this request. + + :type resource_type: string + :param resource_type: The resource type. + + :type resource_id: string + :param resource_id: The ID of the resource (for example., `sg-xxxxxx`). + + :type later_time: timestamp + :param later_time: The time stamp that indicates a later time. If not + specified, current time is taken. + + :type earlier_time: timestamp + :param earlier_time: The time stamp that indicates an earlier time. If + not specified, the action returns paginated results that contain + configuration items that start from when the first configuration + item was recorded. + + :type chronological_order: string + :param chronological_order: The chronological order for configuration + items listed. By default the results are listed in reverse + chronological order. + + :type limit: integer + :param limit: The maximum number of configuration items returned in + each page. The default is 10. You cannot specify a limit greater + than 100. + + :type next_token: string + :param next_token: An optional parameter used for pagination of the + results. + + """ + params = { + 'resourceType': resource_type, + 'resourceId': resource_id, + } + if later_time is not None: + params['laterTime'] = later_time + if earlier_time is not None: + params['earlierTime'] = earlier_time + if chronological_order is not None: + params['chronologicalOrder'] = chronological_order + if limit is not None: + params['limit'] = limit + if next_token is not None: + params['nextToken'] = next_token + return self.make_request(action='GetResourceConfigHistory', + body=json.dumps(params)) + + def put_configuration_recorder(self, configuration_recorder): + """ + Creates a new configuration recorder to record the resource + configurations. + + You can use this action to change the role ( `roleARN`) of an + existing recorder. To change the role, call the action on the + existing configuration recorder and specify a role. + + :type configuration_recorder: dict + :param configuration_recorder: The configuration recorder object that + records each configuration change made to the resources. The + format should follow: + + {'name': 'myrecorder', + 'roleARN': 'arn:aws:iam::123456789012:role/trusted-aws-config'} + + """ + params = {'ConfigurationRecorder': configuration_recorder, } + return self.make_request(action='PutConfigurationRecorder', + body=json.dumps(params)) + + def put_delivery_channel(self, delivery_channel): + """ + Creates a new delivery channel object to deliver the + configuration information to an Amazon S3 bucket, and to an + Amazon SNS topic. + + You can use this action to change the Amazon S3 bucket or an + Amazon SNS topic of the existing delivery channel. To change + the Amazon S3 bucket or an Amazon SNS topic, call this action + and specify the changed values for the S3 bucket and the SNS + topic. If you specify a different value for either the S3 + bucket or the SNS topic, this action will keep the existing + value for the parameter that is not changed. + + :type delivery_channel: dict + :param delivery_channel: The configuration delivery channel object that + delivers the configuration information to an Amazon S3 bucket, and + to an Amazon SNS topic. + + """ + params = {'DeliveryChannel': delivery_channel, } + return self.make_request(action='PutDeliveryChannel', + body=json.dumps(params)) + + def start_configuration_recorder(self, configuration_recorder_name): + """ + Starts recording configurations of all the resources + associated with the account. + + You must have created at least one delivery channel to + successfully start the configuration recorder. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StartConfigurationRecorder', + body=json.dumps(params)) + + def stop_configuration_recorder(self, configuration_recorder_name): + """ + Stops recording configurations of all the resources associated + with the account. + + :type configuration_recorder_name: string + :param configuration_recorder_name: The name of the recorder object + that records each configuration change made to the resources. + + """ + params = { + 'ConfigurationRecorderName': configuration_recorder_name, + } + return self.make_request(action='StopConfigurationRecorder', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/connection.py b/ext/boto/connection.py new file mode 100644 index 0000000000..2fef44872f --- /dev/null +++ b/ext/boto/connection.py @@ -0,0 +1,1227 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010 Google +# Copyright (c) 2008 rPath, Inc. +# Copyright (c) 2009 The Echo Nest Corporation +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Handles basic connections to AWS +""" +from datetime import datetime +import errno +import os +import random +import re +import socket +import sys +import time +import xml.sax +import copy + +from boto import auth +from boto import auth_handler +import boto +import boto.utils +import boto.handler +import boto.cacerts + +from boto import config, UserAgent +from boto.compat import six, http_client, urlparse, quote, encodebytes +from boto.exception import AWSConnectionError +from boto.exception import BotoClientError +from boto.exception import BotoServerError +from boto.exception import PleaseRetryException +from boto.provider import Provider +from boto.resultset import ResultSet + +HAVE_HTTPS_CONNECTION = False +try: + import ssl + from boto import https_connection + # Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError. + if hasattr(ssl, 'SSLError'): + HAVE_HTTPS_CONNECTION = True +except ImportError: + pass + +try: + import threading +except ImportError: + import dummy_threading as threading + +ON_APP_ENGINE = all(key in os.environ for key in ( + 'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID')) + +PORTS_BY_SECURITY = {True: 443, + False: 80} + +DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__)), "cacerts.txt") + + +class HostConnectionPool(object): + + """ + A pool of connections for one remote (host,port,is_secure). + + When connections are added to the pool, they are put into a + pending queue. The _mexe method returns connections to the pool + before the response body has been read, so they connections aren't + ready to send another request yet. They stay in the pending queue + until they are ready for another request, at which point they are + returned to the pool of ready connections. + + The pool of ready connections is an ordered list of + (connection,time) pairs, where the time is the time the connection + was returned from _mexe. After a certain period of time, + connections are considered stale, and discarded rather than being + reused. This saves having to wait for the connection to time out + if AWS has decided to close it on the other end because of + inactivity. + + Thread Safety: + + This class is used only from ConnectionPool while it's mutex + is held. + """ + + def __init__(self): + self.queue = [] + + def size(self): + """ + Returns the number of connections in the pool for this host. + Some of the connections may still be in use, and may not be + ready to be returned by get(). + """ + return len(self.queue) + + def put(self, conn): + """ + Adds a connection to the pool, along with the time it was + added. + """ + self.queue.append((conn, time.time())) + + def get(self): + """ + Returns the next connection in this pool that is ready to be + reused. Returns None if there aren't any. + """ + # Discard ready connections that are too old. + self.clean() + + # Return the first connection that is ready, and remove it + # from the queue. Connections that aren't ready are returned + # to the end of the queue with an updated time, on the + # assumption that somebody is actively reading the response. + for _ in range(len(self.queue)): + (conn, _) = self.queue.pop(0) + if self._conn_ready(conn): + return conn + else: + self.put(conn) + return None + + def _conn_ready(self, conn): + """ + There is a nice state diagram at the top of http_client.py. It + indicates that once the response headers have been read (which + _mexe does before adding the connection to the pool), a + response is attached to the connection, and it stays there + until it's done reading. This isn't entirely true: even after + the client is done reading, the response may be closed, but + not removed from the connection yet. + + This is ugly, reading a private instance variable, but the + state we care about isn't available in any public methods. + """ + if ON_APP_ENGINE: + # Google AppEngine implementation of HTTPConnection doesn't contain + # _HTTPConnection__response attribute. Moreover, it's not possible + # to determine if given connection is ready. Reusing connections + # simply doesn't make sense with App Engine urlfetch service. + return False + else: + response = getattr(conn, '_HTTPConnection__response', None) + return (response is None) or response.isclosed() + + def clean(self): + """ + Get rid of stale connections. + """ + # Note that we do not close the connection here -- somebody + # may still be reading from it. + while len(self.queue) > 0 and self._pair_stale(self.queue[0]): + self.queue.pop(0) + + def _pair_stale(self, pair): + """ + Returns true of the (connection,time) pair is too old to be + used. + """ + (_conn, return_time) = pair + now = time.time() + return return_time + ConnectionPool.STALE_DURATION < now + + +class ConnectionPool(object): + + """ + A connection pool that expires connections after a fixed period of + time. This saves time spent waiting for a connection that AWS has + timed out on the other end. + + This class is thread-safe. + """ + + # + # The amout of time between calls to clean. + # + + CLEAN_INTERVAL = 5.0 + + # + # How long before a connection becomes "stale" and won't be reused + # again. The intention is that this time is less that the timeout + # period that AWS uses, so we'll never try to reuse a connection + # and find that AWS is timing it out. + # + # Experimentation in July 2011 shows that AWS starts timing things + # out after three minutes. The 60 seconds here is conservative so + # we should never hit that 3-minute timout. + # + + STALE_DURATION = 60.0 + + def __init__(self): + # Mapping from (host,port,is_secure) to HostConnectionPool. + # If a pool becomes empty, it is removed. + self.host_to_pool = {} + # The last time the pool was cleaned. + self.last_clean_time = 0.0 + self.mutex = threading.Lock() + ConnectionPool.STALE_DURATION = \ + config.getfloat('Boto', 'connection_stale_duration', + ConnectionPool.STALE_DURATION) + + def __getstate__(self): + pickled_dict = copy.copy(self.__dict__) + pickled_dict['host_to_pool'] = {} + del pickled_dict['mutex'] + return pickled_dict + + def __setstate__(self, dct): + self.__init__() + + def size(self): + """ + Returns the number of connections in the pool. + """ + return sum(pool.size() for pool in self.host_to_pool.values()) + + def get_http_connection(self, host, port, is_secure): + """ + Gets a connection from the pool for the named host. Returns + None if there is no connection that can be reused. It's the caller's + responsibility to call close() on the connection when it's no longer + needed. + """ + self.clean() + with self.mutex: + key = (host, port, is_secure) + if key not in self.host_to_pool: + return None + return self.host_to_pool[key].get() + + def put_http_connection(self, host, port, is_secure, conn): + """ + Adds a connection to the pool of connections that can be + reused for the named host. + """ + with self.mutex: + key = (host, port, is_secure) + if key not in self.host_to_pool: + self.host_to_pool[key] = HostConnectionPool() + self.host_to_pool[key].put(conn) + + def clean(self): + """ + Clean up the stale connections in all of the pools, and then + get rid of empty pools. Pools clean themselves every time a + connection is fetched; this cleaning takes care of pools that + aren't being used any more, so nothing is being gotten from + them. + """ + with self.mutex: + now = time.time() + if self.last_clean_time + self.CLEAN_INTERVAL < now: + to_remove = [] + for (host, pool) in self.host_to_pool.items(): + pool.clean() + if pool.size() == 0: + to_remove.append(host) + for host in to_remove: + del self.host_to_pool[host] + self.last_clean_time = now + + +class HTTPRequest(object): + + def __init__(self, method, protocol, host, port, path, auth_path, + params, headers, body): + """Represents an HTTP request. + + :type method: string + :param method: The HTTP method name, 'GET', 'POST', 'PUT' etc. + + :type protocol: string + :param protocol: The http protocol used, 'http' or 'https'. + + :type host: string + :param host: Host to which the request is addressed. eg. abc.com + + :type port: int + :param port: port on which the request is being sent. Zero means unset, + in which case default port will be chosen. + + :type path: string + :param path: URL path that is being accessed. + + :type auth_path: string + :param path: The part of the URL path used when creating the + authentication string. + + :type params: dict + :param params: HTTP url query parameters, with key as name of + the param, and value as value of param. + + :type headers: dict + :param headers: HTTP headers, with key as name of the header and value + as value of header. + + :type body: string + :param body: Body of the HTTP request. If not present, will be None or + empty string (''). + """ + self.method = method + self.protocol = protocol + self.host = host + self.port = port + self.path = path + if auth_path is None: + auth_path = path + self.auth_path = auth_path + self.params = params + # chunked Transfer-Encoding should act only on PUT request. + if headers and 'Transfer-Encoding' in headers and \ + headers['Transfer-Encoding'] == 'chunked' and \ + self.method != 'PUT': + self.headers = headers.copy() + del self.headers['Transfer-Encoding'] + else: + self.headers = headers + self.body = body + + def __str__(self): + return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) ' + 'params(%s) headers(%s) body(%s)') % (self.method, + self.protocol, self.host, self.port, self.path, self.params, + self.headers, self.body)) + + def authorize(self, connection, **kwargs): + if not getattr(self, '_headers_quoted', False): + for key in self.headers: + val = self.headers[key] + if isinstance(val, six.text_type): + safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~ ' + self.headers[key] = quote(val.encode('utf-8'), safe) + setattr(self, '_headers_quoted', True) + + self.headers['User-Agent'] = UserAgent + + connection._auth_handler.add_auth(self, **kwargs) + + # I'm not sure if this is still needed, now that add_auth is + # setting the content-length for POST requests. + if 'Content-Length' not in self.headers: + if 'Transfer-Encoding' not in self.headers or \ + self.headers['Transfer-Encoding'] != 'chunked': + self.headers['Content-Length'] = str(len(self.body)) + + +class HTTPResponse(http_client.HTTPResponse): + + def __init__(self, *args, **kwargs): + http_client.HTTPResponse.__init__(self, *args, **kwargs) + self._cached_response = '' + + def read(self, amt=None): + """Read the response. + + This method does not have the same behavior as + http_client.HTTPResponse.read. Instead, if this method is called with + no ``amt`` arg, then the response body will be cached. Subsequent + calls to ``read()`` with no args **will return the cached response**. + + """ + if amt is None: + # The reason for doing this is that many places in boto call + # response.read() and except to get the response body that they + # can then process. To make sure this always works as they expect + # we're caching the response so that multiple calls to read() + # will return the full body. Note that this behavior only + # happens if the amt arg is not specified. + if not self._cached_response: + self._cached_response = http_client.HTTPResponse.read(self) + return self._cached_response + else: + return http_client.HTTPResponse.read(self, amt) + + +class AWSAuthConnection(object): + def __init__(self, host, aws_access_key_id=None, + aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, path='/', + provider='aws', security_token=None, + suppress_consec_slashes=True, + validate_certs=True, profile_name=None): + """ + :type host: str + :param host: The host to make the connection to + + :keyword str aws_access_key_id: Your AWS Access Key ID (provided by + Amazon). If none is specified, the value in your + ``AWS_ACCESS_KEY_ID`` environmental variable is used. + :keyword str aws_secret_access_key: Your AWS Secret Access Key + (provided by Amazon). If none is specified, the value in your + ``AWS_SECRET_ACCESS_KEY`` environmental variable is used. + :keyword str security_token: The security token associated with + temporary credentials issued by STS. Optional unless using + temporary credentials. If none is specified, the environment + variable ``AWS_SECURITY_TOKEN`` is used if defined. + + :type is_secure: boolean + :param is_secure: Whether the connection is over SSL + + :type https_connection_factory: list or tuple + :param https_connection_factory: A pair of an HTTP connection + factory and the exceptions to catch. The factory should have + a similar interface to L{http_client.HTTPSConnection}. + + :param str proxy: Address/hostname for a proxy server + + :type proxy_port: int + :param proxy_port: The port to use when connecting over a proxy + + :type proxy_user: str + :param proxy_user: The username to connect with on the proxy + + :type proxy_pass: str + :param proxy_pass: The password to use when connection over a proxy. + + :type port: int + :param port: The port to use to connect + + :type suppress_consec_slashes: bool + :param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + + :type validate_certs: bool + :param validate_certs: Controls whether SSL certificates + will be validated or not. Defaults to True. + + :type profile_name: str + :param profile_name: Override usual Credentials section in config + file to use a named set of keys instead. + """ + self.suppress_consec_slashes = suppress_consec_slashes + self.num_retries = 6 + # Override passed-in is_secure setting if value was defined in config. + if config.has_option('Boto', 'is_secure'): + is_secure = config.getboolean('Boto', 'is_secure') + self.is_secure = is_secure + # Whether or not to validate server certificates. + # The default is now to validate certificates. This can be + # overridden in the boto config file are by passing an + # explicit validate_certs parameter to the class constructor. + self.https_validate_certificates = config.getbool( + 'Boto', 'https_validate_certificates', + validate_certs) + if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION: + raise BotoClientError( + "SSL server certificate validation is enabled in boto " + "configuration, but Python dependencies required to " + "support this feature are not available. Certificate " + "validation is only supported when running under Python " + "2.6 or later.") + certs_file = config.get_value( + 'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + if certs_file == 'system': + certs_file = None + self.ca_certificates_file = certs_file + if port: + self.port = port + else: + self.port = PORTS_BY_SECURITY[is_secure] + + self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass) + # define exceptions from http_client that we want to catch and retry + self.http_exceptions = (http_client.HTTPException, socket.error, + socket.gaierror, http_client.BadStatusLine) + # define subclasses of the above that are not retryable. + self.http_unretryable_exceptions = [] + if HAVE_HTTPS_CONNECTION: + self.http_unretryable_exceptions.append( + https_connection.InvalidCertificateException) + + # define values in socket exceptions we don't want to catch + self.socket_exception_values = (errno.EINTR,) + if https_connection_factory is not None: + self.https_connection_factory = https_connection_factory[0] + self.http_exceptions += https_connection_factory[1] + else: + self.https_connection_factory = None + if (is_secure): + self.protocol = 'https' + else: + self.protocol = 'http' + self.host = host + self.path = path + # if the value passed in for debug + if not isinstance(debug, six.integer_types): + debug = 0 + self.debug = config.getint('Boto', 'debug', debug) + self.host_header = None + + # Timeout used to tell http_client how long to wait for socket timeouts. + # Default is to leave timeout unchanged, which will in turn result in + # the socket's default global timeout being used. To specify a + # timeout, set http_socket_timeout in Boto config. Regardless, + # timeouts will only be applied if Python is 2.6 or greater. + self.http_connection_kwargs = {} + if (sys.version_info[0], sys.version_info[1]) >= (2, 6): + # If timeout isn't defined in boto config file, use 70 second + # default as recommended by + # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html + self.http_connection_kwargs['timeout'] = config.getint( + 'Boto', 'http_socket_timeout', 70) + + if isinstance(provider, Provider): + # Allow overriding Provider + self.provider = provider + else: + self._provider_type = provider + self.provider = Provider(self._provider_type, + aws_access_key_id, + aws_secret_access_key, + security_token, + profile_name) + + # Allow config file to override default host, port, and host header. + if self.provider.host: + self.host = self.provider.host + if self.provider.port: + self.port = self.provider.port + if self.provider.host_header: + self.host_header = self.provider.host_header + + self._pool = ConnectionPool() + self._connection = (self.host, self.port, self.is_secure) + self._last_rs = None + self._auth_handler = auth.get_auth_handler( + host, config, self.provider, self._required_auth_capability()) + if getattr(self, 'AuthServiceName', None) is not None: + self.auth_service_name = self.AuthServiceName + self.request_hook = None + + def __repr__(self): + return '%s:%s' % (self.__class__.__name__, self.host) + + def _required_auth_capability(self): + return [] + + def _get_auth_service_name(self): + return getattr(self._auth_handler, 'service_name') + + # For Sigv4, the auth_service_name/auth_region_name properties allow + # the service_name/region_name to be explicitly set instead of being + # derived from the endpoint url. + def _set_auth_service_name(self, value): + self._auth_handler.service_name = value + auth_service_name = property(_get_auth_service_name, _set_auth_service_name) + + def _get_auth_region_name(self): + return getattr(self._auth_handler, 'region_name') + + def _set_auth_region_name(self, value): + self._auth_handler.region_name = value + auth_region_name = property(_get_auth_region_name, _set_auth_region_name) + + def connection(self): + return self.get_http_connection(*self._connection) + connection = property(connection) + + def aws_access_key_id(self): + return self.provider.access_key + aws_access_key_id = property(aws_access_key_id) + gs_access_key_id = aws_access_key_id + access_key = aws_access_key_id + + def aws_secret_access_key(self): + return self.provider.secret_key + aws_secret_access_key = property(aws_secret_access_key) + gs_secret_access_key = aws_secret_access_key + secret_key = aws_secret_access_key + + def profile_name(self): + return self.provider.profile_name + profile_name = property(profile_name) + + def get_path(self, path='/'): + # The default behavior is to suppress consecutive slashes for reasons + # discussed at + # https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8 + # You can override that behavior with the suppress_consec_slashes param. + if not self.suppress_consec_slashes: + return self.path + re.sub('^(/*)/', "\\1", path) + pos = path.find('?') + if pos >= 0: + params = path[pos:] + path = path[:pos] + else: + params = None + if path[-1] == '/': + need_trailing = True + else: + need_trailing = False + path_elements = self.path.split('/') + path_elements.extend(path.split('/')) + path_elements = [p for p in path_elements if p] + path = '/' + '/'.join(path_elements) + if path[-1] != '/' and need_trailing: + path += '/' + if params: + path = path + params + return path + + def server_name(self, port=None): + if not port: + port = self.port + if port == 80: + signature_host = self.host + else: + # This unfortunate little hack can be attributed to + # a difference in the 2.6 version of http_client. In old + # versions, it would append ":443" to the hostname sent + # in the Host header and so we needed to make sure we + # did the same when calculating the V2 signature. In 2.6 + # (and higher!) + # it no longer does that. Hence, this kludge. + if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or + sys.version[:3] in ('2.6', '2.7')) and port == 443: + signature_host = self.host + else: + signature_host = '%s:%d' % (self.host, port) + return signature_host + + def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass): + self.proxy = proxy + self.proxy_port = proxy_port + self.proxy_user = proxy_user + self.proxy_pass = proxy_pass + if 'http_proxy' in os.environ and not self.proxy: + pattern = re.compile( + '(?:http://)?' + '(?:(?P[\w\-\.]+):(?P.*)@)?' + '(?P[\w\-\.]+)' + '(?::(?P\d+))?' + ) + match = pattern.match(os.environ['http_proxy']) + if match: + self.proxy = match.group('host') + self.proxy_port = match.group('port') + self.proxy_user = match.group('user') + self.proxy_pass = match.group('pass') + else: + if not self.proxy: + self.proxy = config.get_value('Boto', 'proxy', None) + if not self.proxy_port: + self.proxy_port = config.get_value('Boto', 'proxy_port', None) + if not self.proxy_user: + self.proxy_user = config.get_value('Boto', 'proxy_user', None) + if not self.proxy_pass: + self.proxy_pass = config.get_value('Boto', 'proxy_pass', None) + + if not self.proxy_port and self.proxy: + print("http_proxy environment variable does not specify " + "a port, using default") + self.proxy_port = self.port + + self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') + self.use_proxy = (self.proxy is not None) + + def get_http_connection(self, host, port, is_secure): + conn = self._pool.get_http_connection(host, port, is_secure) + if conn is not None: + return conn + else: + return self.new_http_connection(host, port, is_secure) + + def skip_proxy(self, host): + if not self.no_proxy: + return False + + if self.no_proxy == "*": + return True + + hostonly = host + hostonly = host.split(':')[0] + + for name in self.no_proxy.split(','): + if name and (hostonly.endswith(name) or host.endswith(name)): + return True + + return False + + def new_http_connection(self, host, port, is_secure): + if host is None: + host = self.server_name() + + # Make sure the host is really just the host, not including + # the port number + host = boto.utils.parse_host(host) + + http_connection_kwargs = self.http_connection_kwargs.copy() + + # Connection factories below expect a port keyword argument + http_connection_kwargs['port'] = port + + # Override host with proxy settings if needed + if self.use_proxy and not is_secure and \ + not self.skip_proxy(host): + host = self.proxy + http_connection_kwargs['port'] = int(self.proxy_port) + + if is_secure: + boto.log.debug( + 'establishing HTTPS connection: host=%s, kwargs=%s', + host, http_connection_kwargs) + if self.use_proxy and not self.skip_proxy(host): + connection = self.proxy_ssl(host, is_secure and 443 or 80) + elif self.https_connection_factory: + connection = self.https_connection_factory(host) + elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION: + connection = https_connection.CertValidatingHTTPSConnection( + host, ca_certs=self.ca_certificates_file, + **http_connection_kwargs) + else: + connection = http_client.HTTPSConnection( + host, **http_connection_kwargs) + else: + boto.log.debug('establishing HTTP connection: kwargs=%s' % + http_connection_kwargs) + if self.https_connection_factory: + # even though the factory says https, this is too handy + # to not be able to allow overriding for http also. + connection = self.https_connection_factory( + host, **http_connection_kwargs) + else: + connection = http_client.HTTPConnection( + host, **http_connection_kwargs) + if self.debug > 1: + connection.set_debuglevel(self.debug) + # self.connection must be maintained for backwards-compatibility + # however, it must be dynamically pulled from the connection pool + # set a private variable which will enable that + if host.split(':')[0] == self.host and is_secure == self.is_secure: + self._connection = (host, port, is_secure) + # Set the response class of the http connection to use our custom + # class. + connection.response_class = HTTPResponse + return connection + + def put_http_connection(self, host, port, is_secure, connection): + self._pool.put_http_connection(host, port, is_secure, connection) + + def proxy_ssl(self, host=None, port=None): + if host and port: + host = '%s:%d' % (host, port) + else: + host = '%s:%d' % (self.host, self.port) + # Seems properly to use timeout for connect too + timeout = self.http_connection_kwargs.get("timeout") + if timeout is not None: + sock = socket.create_connection((self.proxy, + int(self.proxy_port)), timeout) + else: + sock = socket.create_connection((self.proxy, int(self.proxy_port))) + boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host) + sock.sendall("CONNECT %s HTTP/1.0\r\n" % host) + sock.sendall("User-Agent: %s\r\n" % UserAgent) + if self.proxy_user and self.proxy_pass: + for k, v in self.get_proxy_auth_header().items(): + sock.sendall("%s: %s\r\n" % (k, v)) + # See discussion about this config option at + # https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc + if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False): + sock.sendall("\r\n") + else: + sock.sendall("\r\n") + resp = http_client.HTTPResponse(sock, strict=True, debuglevel=self.debug) + resp.begin() + + if resp.status != 200: + # Fake a socket error, use a code that make it obvious it hasn't + # been generated by the socket library + raise socket.error(-71, + "Error talking to HTTP proxy %s:%s: %s (%s)" % + (self.proxy, self.proxy_port, + resp.status, resp.reason)) + + # We can safely close the response, it duped the original socket + resp.close() + + h = http_client.HTTPConnection(host) + + if self.https_validate_certificates and HAVE_HTTPS_CONNECTION: + msg = "wrapping ssl socket for proxied connection; " + if self.ca_certificates_file: + msg += "CA certificate file=%s" % self.ca_certificates_file + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) + key_file = self.http_connection_kwargs.get('key_file', None) + cert_file = self.http_connection_kwargs.get('cert_file', None) + sslSock = ssl.wrap_socket(sock, keyfile=key_file, + certfile=cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_certificates_file) + cert = sslSock.getpeercert() + hostname = self.host.split(':', 0)[0] + if not https_connection.ValidateCertificateHostname(cert, hostname): + raise https_connection.InvalidCertificateException( + hostname, cert, 'hostname mismatch') + else: + # Fallback for old Python without ssl.wrap_socket + if hasattr(http_client, 'ssl'): + sslSock = http_client.ssl.SSLSocket(sock) + else: + sslSock = socket.ssl(sock, None, None) + sslSock = http_client.FakeSocket(sock, sslSock) + + # This is a bit unclean + h.sock = sslSock + return h + + def prefix_proxy_to_path(self, path, host=None): + path = self.protocol + '://' + (host or self.server_name()) + path + return path + + def get_proxy_auth_header(self): + auth = encodebytes(self.proxy_user + ':' + self.proxy_pass) + return {'Proxy-Authorization': 'Basic %s' % auth} + + # For passing proxy information to other connection libraries, e.g. cloudsearch2 + def get_proxy_url_with_auth(self): + if not self.use_proxy: + return None + + if self.proxy_user or self.proxy_pass: + if self.proxy_pass: + login_info = '%s:%s@' % (self.proxy_user, self.proxy_pass) + else: + login_info = '%s@' % self.proxy_user + else: + login_info = '' + + return 'http://%s%s:%s' % (login_info, self.proxy, str(self.proxy_port or self.port)) + + def set_host_header(self, request): + try: + request.headers['Host'] = \ + self._auth_handler.host_header(self.host, request) + except AttributeError: + request.headers['Host'] = self.host.split(':', 1)[0] + + def set_request_hook(self, hook): + self.request_hook = hook + + def _mexe(self, request, sender=None, override_num_retries=None, + retry_handler=None): + """ + mexe - Multi-execute inside a loop, retrying multiple times to handle + transient Internet errors by simply trying again. + Also handles redirects. + + This code was inspired by the S3Utils classes posted to the boto-users + Google group by Larry Bates. Thanks! + + """ + boto.log.debug('Method: %s' % request.method) + boto.log.debug('Path: %s' % request.path) + boto.log.debug('Data: %s' % request.body) + boto.log.debug('Headers: %s' % request.headers) + boto.log.debug('Host: %s' % request.host) + boto.log.debug('Port: %s' % request.port) + boto.log.debug('Params: %s' % request.params) + response = None + body = None + ex = None + if override_num_retries is None: + num_retries = config.getint('Boto', 'num_retries', self.num_retries) + else: + num_retries = override_num_retries + i = 0 + connection = self.get_http_connection(request.host, request.port, + self.is_secure) + + # Convert body to bytes if needed + if not isinstance(request.body, bytes) and hasattr(request.body, + 'encode'): + request.body = request.body.encode('utf-8') + + while i <= num_retries: + # Use binary exponential backoff to desynchronize client requests. + next_sleep = min(random.random() * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + try: + # we now re-sign each request before it is retried + boto.log.debug('Token: %s' % self.provider.security_token) + request.authorize(connection=self) + # Only force header for non-s3 connections, because s3 uses + # an older signing method + bucket resource URLs that include + # the port info. All others should be now be up to date and + # not include the port. + if 's3' not in self._required_auth_capability(): + if not getattr(self, 'anon', False): + if not request.headers.get('Host'): + self.set_host_header(request) + boto.log.debug('Final headers: %s' % request.headers) + request.start_time = datetime.now() + if callable(sender): + response = sender(connection, request.method, request.path, + request.body, request.headers) + else: + connection.request(request.method, request.path, + request.body, request.headers) + response = connection.getresponse() + boto.log.debug('Response headers: %s' % response.getheaders()) + location = response.getheader('location') + # -- gross hack -- + # http_client gets confused with chunked responses to HEAD requests + # so I have to fake it out + if request.method == 'HEAD' and getattr(response, + 'chunked', False): + response.chunked = 0 + if callable(retry_handler): + status = retry_handler(response, i, next_sleep) + if status: + msg, i, next_sleep = status + if msg: + boto.log.debug(msg) + time.sleep(next_sleep) + continue + if response.status in [500, 502, 503, 504]: + msg = 'Received %d response. ' % response.status + msg += 'Retrying in %3.1f seconds' % next_sleep + boto.log.debug(msg) + body = response.read() + if isinstance(body, bytes): + body = body.decode('utf-8') + elif response.status < 300 or response.status >= 400 or \ + not location: + # don't return connection to the pool if response contains + # Connection:close header, because the connection has been + # closed and default reconnect behavior may do something + # different than new_http_connection. Also, it's probably + # less efficient to try to reuse a closed connection. + conn_header_value = response.getheader('connection') + if conn_header_value == 'close': + connection.close() + else: + self.put_http_connection(request.host, request.port, + self.is_secure, connection) + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response) + return response + else: + scheme, request.host, request.path, \ + params, query, fragment = urlparse(location) + if query: + request.path += '?' + query + # urlparse can return both host and port in netloc, so if + # that's the case we need to split them up properly + if ':' in request.host: + request.host, request.port = request.host.split(':', 1) + msg = 'Redirecting: %s' % scheme + '://' + msg += request.host + request.path + boto.log.debug(msg) + connection = self.get_http_connection(request.host, + request.port, + scheme == 'https') + response = None + continue + except PleaseRetryException as e: + boto.log.debug('encountered a retry exception: %s' % e) + connection = self.new_http_connection(request.host, request.port, + self.is_secure) + response = e.response + ex = e + except self.http_exceptions as e: + for unretryable in self.http_unretryable_exceptions: + if isinstance(e, unretryable): + boto.log.debug( + 'encountered unretryable %s exception, re-raising' % + e.__class__.__name__) + raise + boto.log.debug('encountered %s exception, reconnecting' % + e.__class__.__name__) + connection = self.new_http_connection(request.host, request.port, + self.is_secure) + ex = e + time.sleep(next_sleep) + i += 1 + # If we made it here, it's because we have exhausted our retries + # and stil haven't succeeded. So, if we have a response object, + # use it to raise an exception. + # Otherwise, raise the exception that must have already happened. + if self.request_hook is not None: + self.request_hook.handle_request_data(request, response, error=True) + if response: + raise BotoServerError(response.status, response.reason, body) + elif ex: + raise ex + else: + msg = 'Please report this exception as a Boto Issue!' + raise BotoClientError(msg) + + def build_base_http_request(self, method, path, auth_path, + params=None, headers=None, data='', host=None): + path = self.get_path(path) + if auth_path is not None: + auth_path = self.get_path(auth_path) + if params is None: + params = {} + else: + params = params.copy() + if headers is None: + headers = {} + else: + headers = headers.copy() + if self.host_header and not boto.utils.find_matching_headers('host', headers): + headers['host'] = self.host_header + host = host or self.host + if self.use_proxy and not self.skip_proxy(host): + if not auth_path: + auth_path = path + path = self.prefix_proxy_to_path(path, host) + if self.proxy_user and self.proxy_pass and not self.is_secure: + # If is_secure, we don't have to set the proxy authentication + # header here, we did that in the CONNECT to the proxy. + headers.update(self.get_proxy_auth_header()) + return HTTPRequest(method, self.protocol, host, self.port, + path, auth_path, params, headers, data) + + def make_request(self, method, path, headers=None, data='', host=None, + auth_path=None, sender=None, override_num_retries=None, + params=None, retry_handler=None): + """Makes a request to the server, with stock multiple-retry logic.""" + if params is None: + params = {} + http_request = self.build_base_http_request(method, path, auth_path, + params, headers, data, host) + return self._mexe(http_request, sender, override_num_retries, + retry_handler=retry_handler) + + def close(self): + """(Optional) Close any open HTTP connections. This is non-destructive, + and making a new request will open a connection again.""" + + boto.log.debug('closing all HTTP connections') + self._connection = None # compat field + + +class AWSQueryConnection(AWSAuthConnection): + + APIVersion = '' + ResponseError = BotoServerError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=None, debug=0, + https_connection_factory=None, path='/', security_token=None, + validate_certs=True, profile_name=None, provider='aws'): + super(AWSQueryConnection, self).__init__( + host, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + debug, https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name, + provider=provider) + + def _required_auth_capability(self): + return [] + + def get_utf8_value(self, value): + return boto.utils.get_utf8_value(value) + + def make_request(self, action, params=None, path='/', verb='GET'): + http_request = self.build_base_http_request(verb, path, None, + params, {}, '', + self.host) + if action: + http_request.params['Action'] = action + if self.APIVersion: + http_request.params['Version'] = self.APIVersion + return self._mexe(http_request) + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for i in range(1, len(items) + 1): + params['%s.%d' % (label, i)] = items[i - 1] + + def build_complex_list_params(self, params, items, label, names): + """Serialize a list of structures. + + For example:: + + items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')] + label = 'ParamName.member' + names = ('One', 'Two', 'Three') + self.build_complex_list_params(params, items, label, names) + + would result in the params dict being updated with these params:: + + ParamName.member.1.One = foo + ParamName.member.1.Two = bar + ParamName.member.1.Three = baz + + ParamName.member.2.One = foo2 + ParamName.member.2.Two = bar2 + ParamName.member.2.Three = baz2 + + :type params: dict + :param params: The params dict. The complex list params + will be added to this dict. + + :type items: list of tuples + :param items: The list to serialize. + + :type label: string + :param label: The prefix to apply to the parameter. + + :type names: tuple of strings + :param names: The names associated with each tuple element. + + """ + for i, item in enumerate(items, 1): + current_prefix = '%s.%s' % (label, i) + for key, value in zip(names, item): + full_key = '%s.%s' % (current_prefix, key) + params[full_key] = value + + # generics + + def get_list(self, action, params, markers, path='/', + parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + rs = ResultSet(markers) + h = boto.handler.XmlHandler(rs, parent) + if isinstance(body, six.text_type): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_object(self, action, params, cls, path='/', + parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + obj = cls(parent) + h = boto.handler.XmlHandler(obj, parent) + if isinstance(body, six.text_type): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return obj + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_status(self, action, params, path='/', parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self.ResponseError(response.status, response.reason, body) + elif response.status == 200: + rs = ResultSet() + h = boto.handler.XmlHandler(rs, parent) + xml.sax.parseString(body, h) + return rs.status + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) diff --git a/ext/boto/datapipeline/__init__.py b/ext/boto/datapipeline/__init__.py new file mode 100644 index 0000000000..a21a69794d --- /dev/null +++ b/ext/boto/datapipeline/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Datapipeline service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.datapipeline.layer1 import DataPipelineConnection + return get_regions('datapipeline', connection_cls=DataPipelineConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.datapipeline.layer1 import DataPipelineConnection + return connect('datapipeline', region_name, + connection_cls=DataPipelineConnection, **kw_params) diff --git a/ext/boto/datapipeline/exceptions.py b/ext/boto/datapipeline/exceptions.py new file mode 100644 index 0000000000..c2761e25c1 --- /dev/null +++ b/ext/boto/datapipeline/exceptions.py @@ -0,0 +1,42 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class PipelineDeletedException(JSONResponseError): + pass + + +class InvalidRequestException(JSONResponseError): + pass + + +class TaskNotFoundException(JSONResponseError): + pass + + +class PipelineNotFoundException(JSONResponseError): + pass + + +class InternalServiceError(JSONResponseError): + pass diff --git a/ext/boto/datapipeline/layer1.py b/ext/boto/datapipeline/layer1.py new file mode 100644 index 0000000000..028fd9d2e3 --- /dev/null +++ b/ext/boto/datapipeline/layer1.py @@ -0,0 +1,639 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.datapipeline import exceptions + + +class DataPipelineConnection(AWSQueryConnection): + """ + This is the AWS Data Pipeline API Reference . This guide provides + descriptions and samples of the AWS Data Pipeline API. + + AWS Data Pipeline is a web service that configures and manages a + data-driven workflow called a pipeline. AWS Data Pipeline handles + the details of scheduling and ensuring that data dependencies are + met so your application can focus on processing the data. + + The AWS Data Pipeline API implements two main sets of + functionality. The first set of actions configure the pipeline in + the web service. You call these actions to create a pipeline and + define data sources, schedules, dependencies, and the transforms + to be performed on the data. + + The second set of actions are used by a task runner application + that calls the AWS Data Pipeline API to receive the next task + ready for processing. The logic for performing the task, such as + querying the data, running data analysis, or converting the data + from one format to another, is contained within the task runner. + The task runner performs the task assigned to it by the web + service, reporting progress to the web service as it does so. When + the task is done, the task runner reports the final success or + failure of the task to the web service. + + AWS Data Pipeline provides an open-source implementation of a task + runner called AWS Data Pipeline Task Runner. AWS Data Pipeline + Task Runner provides logic for common data management scenarios, + such as performing database queries and running data analysis + using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data + Pipeline Task Runner as your task runner, or you can write your + own task runner to provide custom data management. + + The AWS Data Pipeline API uses the Signature Version 4 protocol + for signing requests. For more information about how to sign a + request with this protocol, see `Signature Version 4 Signing + Process`_. In the code examples in this reference, the Signature + Version 4 Request parameters are represented as AuthParams. + """ + APIVersion = "2012-10-29" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com" + ServiceName = "DataPipeline" + TargetPrefix = "DataPipeline" + ResponseError = JSONResponseError + + _faults = { + "PipelineDeletedException": exceptions.PipelineDeletedException, + "InvalidRequestException": exceptions.InvalidRequestException, + "TaskNotFoundException": exceptions.TaskNotFoundException, + "PipelineNotFoundException": exceptions.PipelineNotFoundException, + "InternalServiceError": exceptions.InternalServiceError, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + kwargs['host'] = region.endpoint + super(DataPipelineConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def activate_pipeline(self, pipeline_id): + """ + Validates a pipeline and initiates processing. If the pipeline + does not pass validation, activation fails. + + Call this action to start processing pipeline tasks of a + pipeline you've created using the CreatePipeline and + PutPipelineDefinition actions. A pipeline cannot be modified + after it has been successfully activated. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to activate. + + """ + params = {'pipelineId': pipeline_id, } + return self.make_request(action='ActivatePipeline', + body=json.dumps(params)) + + def create_pipeline(self, name, unique_id, description=None): + """ + Creates a new empty pipeline. When this action succeeds, you + can then use the PutPipelineDefinition action to populate the + pipeline. + + :type name: string + :param name: The name of the new pipeline. You can use the same name + for multiple pipelines associated with your AWS account, because + AWS Data Pipeline assigns each new pipeline a unique pipeline + identifier. + + :type unique_id: string + :param unique_id: A unique identifier that you specify. This identifier + is not the same as the pipeline identifier assigned by AWS Data + Pipeline. You are responsible for defining the format and ensuring + the uniqueness of this identifier. You use this parameter to ensure + idempotency during repeated calls to CreatePipeline. For example, + if the first call to CreatePipeline does not return a clear + success, you can pass in the same unique identifier and pipeline + name combination on a subsequent call to CreatePipeline. + CreatePipeline ensures that if a pipeline already exists with the + same name and unique identifier, a new pipeline will not be + created. Instead, you'll receive the pipeline identifier from the + previous attempt. The uniqueness of the name and unique identifier + combination is scoped to the AWS account or IAM user credentials. + + :type description: string + :param description: The description of the new pipeline. + + """ + params = {'name': name, 'uniqueId': unique_id, } + if description is not None: + params['description'] = description + return self.make_request(action='CreatePipeline', + body=json.dumps(params)) + + def delete_pipeline(self, pipeline_id): + """ + Permanently deletes a pipeline, its pipeline definition and + its run history. You cannot query or restore a deleted + pipeline. AWS Data Pipeline will attempt to cancel instances + associated with the pipeline that are currently being + processed by task runners. Deleting a pipeline cannot be + undone. + + To temporarily pause a pipeline instead of deleting it, call + SetStatus with the status set to Pause on individual + components. Components that are paused by SetStatus can be + resumed. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to be deleted. + + """ + params = {'pipelineId': pipeline_id, } + return self.make_request(action='DeletePipeline', + body=json.dumps(params)) + + def describe_objects(self, object_ids, pipeline_id, marker=None, + evaluate_expressions=None): + """ + Returns the object definitions for a set of objects associated + with the pipeline. Object definitions are composed of a set of + fields that define the properties of the object. + + :type pipeline_id: string + :param pipeline_id: Identifier of the pipeline that contains the object + definitions. + + :type object_ids: list + :param object_ids: Identifiers of the pipeline objects that contain the + definitions to be described. You can pass as many as 25 identifiers + in a single call to DescribeObjects. + + :type evaluate_expressions: boolean + :param evaluate_expressions: Indicates whether any expressions in the + object should be evaluated when the object descriptions are + returned. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call DescribeObjects, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + DescribeObjects again and pass the marker value from the response + to retrieve the next set of results. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectIds': object_ids, + } + if evaluate_expressions is not None: + params['evaluateExpressions'] = evaluate_expressions + if marker is not None: + params['marker'] = marker + return self.make_request(action='DescribeObjects', + body=json.dumps(params)) + + def describe_pipelines(self, pipeline_ids): + """ + Retrieve metadata about one or more pipelines. The information + retrieved includes the name of the pipeline, the pipeline + identifier, its current state, and the user account that owns + the pipeline. Using account credentials, you can retrieve + metadata about pipelines that you or your IAM users have + created. If you are using an IAM user account, you can + retrieve metadata about only those pipelines you have read + permission for. + + To retrieve the full pipeline definition instead of metadata + about the pipeline, call the GetPipelineDefinition action. + + :type pipeline_ids: list + :param pipeline_ids: Identifiers of the pipelines to describe. You can + pass as many as 25 identifiers in a single call to + DescribePipelines. You can obtain pipeline identifiers by calling + ListPipelines. + + """ + params = {'pipelineIds': pipeline_ids, } + return self.make_request(action='DescribePipelines', + body=json.dumps(params)) + + def evaluate_expression(self, pipeline_id, expression, object_id): + """ + Evaluates a string in the context of a specified object. A + task runner can use this action to evaluate SQL queries stored + in Amazon S3. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline. + + :type object_id: string + :param object_id: The identifier of the object. + + :type expression: string + :param expression: The expression to evaluate. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectId': object_id, + 'expression': expression, + } + return self.make_request(action='EvaluateExpression', + body=json.dumps(params)) + + def get_pipeline_definition(self, pipeline_id, version=None): + """ + Returns the definition of the specified pipeline. You can call + GetPipelineDefinition to retrieve the pipeline definition you + provided using PutPipelineDefinition. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline. + + :type version: string + :param version: The version of the pipeline definition to retrieve. + This parameter accepts the values `latest` (default) and `active`. + Where `latest` indicates the last definition saved to the pipeline + and `active` indicates the last definition of the pipeline that was + activated. + + """ + params = {'pipelineId': pipeline_id, } + if version is not None: + params['version'] = version + return self.make_request(action='GetPipelineDefinition', + body=json.dumps(params)) + + def list_pipelines(self, marker=None): + """ + Returns a list of pipeline identifiers for all active + pipelines. Identifiers are returned only for pipelines you + have permission to access. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call ListPipelines, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + ListPipelines again and pass the marker value from the response to + retrieve the next set of results. + + """ + params = {} + if marker is not None: + params['marker'] = marker + return self.make_request(action='ListPipelines', + body=json.dumps(params)) + + def poll_for_task(self, worker_group, hostname=None, + instance_identity=None): + """ + Task runners call this action to receive a task to perform + from AWS Data Pipeline. The task runner specifies which tasks + it can perform by setting a value for the workerGroup + parameter of the PollForTask call. The task returned by + PollForTask may come from any of the pipelines that match the + workerGroup value passed in by the task runner and that was + launched using the IAM user credentials specified by the task + runner. + + If tasks are ready in the work queue, PollForTask returns a + response immediately. If no tasks are available in the queue, + PollForTask uses long-polling and holds on to a poll + connection for up to a 90 seconds during which time the first + newly scheduled task is handed to the task runner. To + accomodate this, set the socket timeout in your task runner to + 90 seconds. The task runner should not call PollForTask again + on the same `workerGroup` until it receives a response, and + this may take up to 90 seconds. + + :type worker_group: string + :param worker_group: Indicates the type of task the task runner is + configured to accept and process. The worker group is set as a + field on objects in the pipeline when they are created. You can + only specify a single value for `workerGroup` in the call to + PollForTask. There are no wildcard values permitted in + `workerGroup`, the string must be an exact, case-sensitive, match. + + :type hostname: string + :param hostname: The public DNS name of the calling task runner. + + :type instance_identity: dict + :param instance_identity: Identity information for the Amazon EC2 + instance that is hosting the task runner. You can get this value by + calling the URI, `http://169.254.169.254/latest/meta-data/instance- + id`, from the EC2 instance. For more information, go to `Instance + Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing + in this value proves that your task runner is running on an EC2 + instance, and ensures the proper AWS Data Pipeline service charges + are applied to your pipeline. + + """ + params = {'workerGroup': worker_group, } + if hostname is not None: + params['hostname'] = hostname + if instance_identity is not None: + params['instanceIdentity'] = instance_identity + return self.make_request(action='PollForTask', + body=json.dumps(params)) + + def put_pipeline_definition(self, pipeline_objects, pipeline_id): + """ + Adds tasks, schedules, and preconditions that control the + behavior of the pipeline. You can use PutPipelineDefinition to + populate a new pipeline or to update an existing pipeline that + has not yet been activated. + + PutPipelineDefinition also validates the configuration as it + adds it to the pipeline. Changes to the pipeline are saved + unless one of the following three validation errors exists in + the pipeline. + + #. An object is missing a name or identifier field. + #. A string or reference field is empty. + #. The number of objects in the pipeline exceeds the maximum + allowed objects. + + + + Pipeline object definitions are passed to the + PutPipelineDefinition action and returned by the + GetPipelineDefinition action. + + :type pipeline_id: string + :param pipeline_id: The identifier of the pipeline to be configured. + + :type pipeline_objects: list + :param pipeline_objects: The objects that define the pipeline. These + will overwrite the existing pipeline definition. + + """ + params = { + 'pipelineId': pipeline_id, + 'pipelineObjects': pipeline_objects, + } + return self.make_request(action='PutPipelineDefinition', + body=json.dumps(params)) + + def query_objects(self, pipeline_id, sphere, marker=None, query=None, + limit=None): + """ + Queries a pipeline for the names of objects that match a + specified set of conditions. + + The objects returned by QueryObjects are paginated and then + filtered by the value you set for query. This means the action + may return an empty result set with a value set for marker. If + `HasMoreResults` is set to `True`, you should continue to call + QueryObjects, passing in the returned value for marker, until + `HasMoreResults` returns `False`. + + :type pipeline_id: string + :param pipeline_id: Identifier of the pipeline to be queried for object + names. + + :type query: dict + :param query: Query that defines the objects to be returned. The Query + object can contain a maximum of ten selectors. The conditions in + the query are limited to top-level String fields in the object. + These filters can be applied to components, instances, and + attempts. + + :type sphere: string + :param sphere: Specifies whether the query applies to components or + instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`. + + :type marker: string + :param marker: The starting point for the results to be returned. The + first time you call QueryObjects, this value should be empty. As + long as the action returns `HasMoreResults` as `True`, you can call + QueryObjects again and pass the marker value from the response to + retrieve the next set of results. + + :type limit: integer + :param limit: Specifies the maximum number of object names that + QueryObjects will return in a single call. The default value is + 100. + + """ + params = {'pipelineId': pipeline_id, 'sphere': sphere, } + if query is not None: + params['query'] = query + if marker is not None: + params['marker'] = marker + if limit is not None: + params['limit'] = limit + return self.make_request(action='QueryObjects', + body=json.dumps(params)) + + def report_task_progress(self, task_id): + """ + Updates the AWS Data Pipeline service on the progress of the + calling task runner. When the task runner is assigned a task, + it should call ReportTaskProgress to acknowledge that it has + the task within 2 minutes. If the web service does not recieve + this acknowledgement within the 2 minute window, it will + assign the task in a subsequent PollForTask call. After this + initial acknowledgement, the task runner only needs to report + progress every 15 minutes to maintain its ownership of the + task. You can change this reporting time from 15 minutes by + specifying a `reportProgressTimeout` field in your pipeline. + If a task runner does not report its status after 5 minutes, + AWS Data Pipeline will assume that the task runner is unable + to process the task and will reassign the task in a subsequent + response to PollForTask. task runners should call + ReportTaskProgress every 60 seconds. + + :type task_id: string + :param task_id: Identifier of the task assigned to the task runner. + This value is provided in the TaskObject that the service returns + with the response for the PollForTask action. + + """ + params = {'taskId': task_id, } + return self.make_request(action='ReportTaskProgress', + body=json.dumps(params)) + + def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None, + hostname=None): + """ + Task runners call ReportTaskRunnerHeartbeat every 15 minutes + to indicate that they are operational. In the case of AWS Data + Pipeline Task Runner launched on a resource managed by AWS + Data Pipeline, the web service can use this call to detect + when the task runner application has failed and restart a new + instance. + + :type taskrunner_id: string + :param taskrunner_id: The identifier of the task runner. This value + should be unique across your AWS account. In the case of AWS Data + Pipeline Task Runner launched on a resource managed by AWS Data + Pipeline, the web service provides a unique identifier when it + launches the application. If you have written a custom task runner, + you should assign a unique identifier for the task runner. + + :type worker_group: string + :param worker_group: Indicates the type of task the task runner is + configured to accept and process. The worker group is set as a + field on objects in the pipeline when they are created. You can + only specify a single value for `workerGroup` in the call to + ReportTaskRunnerHeartbeat. There are no wildcard values permitted + in `workerGroup`, the string must be an exact, case-sensitive, + match. + + :type hostname: string + :param hostname: The public DNS name of the calling task runner. + + """ + params = {'taskrunnerId': taskrunner_id, } + if worker_group is not None: + params['workerGroup'] = worker_group + if hostname is not None: + params['hostname'] = hostname + return self.make_request(action='ReportTaskRunnerHeartbeat', + body=json.dumps(params)) + + def set_status(self, object_ids, status, pipeline_id): + """ + Requests that the status of an array of physical or logical + pipeline objects be updated in the pipeline. This update may + not occur immediately, but is eventually consistent. The + status that can be set depends on the type of object. + + :type pipeline_id: string + :param pipeline_id: Identifies the pipeline that contains the objects. + + :type object_ids: list + :param object_ids: Identifies an array of objects. The corresponding + objects can be either physical or components, but not a mix of both + types. + + :type status: string + :param status: Specifies the status to be set on all the objects in + `objectIds`. For components, this can be either `PAUSE` or + `RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or + `MARK_FINISHED`. + + """ + params = { + 'pipelineId': pipeline_id, + 'objectIds': object_ids, + 'status': status, + } + return self.make_request(action='SetStatus', + body=json.dumps(params)) + + def set_task_status(self, task_id, task_status, error_id=None, + error_message=None, error_stack_trace=None): + """ + Notifies AWS Data Pipeline that a task is completed and + provides information about the final status. The task runner + calls this action regardless of whether the task was + sucessful. The task runner does not need to call SetTaskStatus + for tasks that are canceled by the web service during a call + to ReportTaskProgress. + + :type task_id: string + :param task_id: Identifies the task assigned to the task runner. This + value is set in the TaskObject that is returned by the PollForTask + action. + + :type task_status: string + :param task_status: If `FINISHED`, the task successfully completed. If + `FAILED` the task ended unsuccessfully. The `FALSE` value is used + by preconditions. + + :type error_id: string + :param error_id: If an error occurred during the task, this value + specifies an id value that represents the error. This value is set + on the physical attempt object. It is used to display error + information to the user. It should not start with string "Service_" + which is reserved by the system. + + :type error_message: string + :param error_message: If an error occurred during the task, this value + specifies a text description of the error. This value is set on the + physical attempt object. It is used to display error information to + the user. The web service does not parse this value. + + :type error_stack_trace: string + :param error_stack_trace: If an error occurred during the task, this + value specifies the stack trace associated with the error. This + value is set on the physical attempt object. It is used to display + error information to the user. The web service does not parse this + value. + + """ + params = {'taskId': task_id, 'taskStatus': task_status, } + if error_id is not None: + params['errorId'] = error_id + if error_message is not None: + params['errorMessage'] = error_message + if error_stack_trace is not None: + params['errorStackTrace'] = error_stack_trace + return self.make_request(action='SetTaskStatus', + body=json.dumps(params)) + + def validate_pipeline_definition(self, pipeline_objects, pipeline_id): + """ + Tests the pipeline definition with a set of validation checks + to ensure that it is well formed and can run without error. + + :type pipeline_id: string + :param pipeline_id: Identifies the pipeline whose definition is to be + validated. + + :type pipeline_objects: list + :param pipeline_objects: A list of objects that define the pipeline + changes to validate against the pipeline. + + """ + params = { + 'pipelineId': pipeline_id, + 'pipelineObjects': pipeline_objects, + } + return self.make_request(action='ValidatePipelineDefinition', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/directconnect/__init__.py b/ext/boto/directconnect/__init__.py new file mode 100644 index 0000000000..177348c2d2 --- /dev/null +++ b/ext/boto/directconnect/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS DirectConnect service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.directconnect.layer1 import DirectConnectConnection + return get_regions('directconnect', connection_cls=DirectConnectConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.directconnect.layer1 import DirectConnectConnection + return connect('directconnect', region_name, + connection_cls=DirectConnectConnection, **kw_params) diff --git a/ext/boto/directconnect/exceptions.py b/ext/boto/directconnect/exceptions.py new file mode 100644 index 0000000000..88168d302c --- /dev/null +++ b/ext/boto/directconnect/exceptions.py @@ -0,0 +1,29 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class DirectConnectClientException(Exception): + pass + + +class DirectConnectServerException(Exception): + pass diff --git a/ext/boto/directconnect/layer1.py b/ext/boto/directconnect/layer1.py new file mode 100644 index 0000000000..a332b31bdd --- /dev/null +++ b/ext/boto/directconnect/layer1.py @@ -0,0 +1,627 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.directconnect import exceptions +from boto.compat import json + + +class DirectConnectConnection(AWSQueryConnection): + """ + AWS Direct Connect makes it easy to establish a dedicated network + connection from your premises to Amazon Web Services (AWS). Using + AWS Direct Connect, you can establish private connectivity between + AWS and your data center, office, or colocation environment, which + in many cases can reduce your network costs, increase bandwidth + throughput, and provide a more consistent network experience than + Internet-based connections. + + The AWS Direct Connect API Reference provides descriptions, + syntax, and usage examples for each of the actions and data types + for AWS Direct Connect. Use the following links to get started + using the AWS Direct Connect API Reference : + + + + `Actions`_: An alphabetical list of all AWS Direct Connect + actions. + + `Data Types`_: An alphabetical list of all AWS Direct Connect + data types. + + `Common Query Parameters`_: Parameters that all Query actions + can use. + + `Common Errors`_: Client and server errors that all actions can + return. + """ + APIVersion = "2012-10-25" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com" + ServiceName = "DirectConnect" + TargetPrefix = "OvertureService" + ResponseError = JSONResponseError + + _faults = { + "DirectConnectClientException": exceptions.DirectConnectClientException, + "DirectConnectServerException": exceptions.DirectConnectServerException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(DirectConnectConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def allocate_connection_on_interconnect(self, bandwidth, connection_name, + owner_account, interconnect_id, + vlan): + """ + Creates a hosted connection on an interconnect. + + Allocates a VLAN number and a specified amount of bandwidth + for use by a hosted connection on the given interconnect. + + :type bandwidth: string + :param bandwidth: Bandwidth of the connection. + Example: " 500Mbps " + + Default: None + + :type connection_name: string + :param connection_name: Name of the provisioned connection. + Example: " 500M Connection to AWS " + + Default: None + + :type owner_account: string + :param owner_account: Numeric account Id of the customer for whom the + connection will be provisioned. + Example: 123443215678 + + Default: None + + :type interconnect_id: string + :param interconnect_id: ID of the interconnect on which the connection + will be provisioned. + Example: dxcon-456abc78 + + Default: None + + :type vlan: integer + :param vlan: The dedicated VLAN provisioned to the connection. + Example: 101 + + Default: None + + """ + params = { + 'bandwidth': bandwidth, + 'connectionName': connection_name, + 'ownerAccount': owner_account, + 'interconnectId': interconnect_id, + 'vlan': vlan, + } + return self.make_request(action='AllocateConnectionOnInterconnect', + body=json.dumps(params)) + + def allocate_private_virtual_interface(self, connection_id, + owner_account, + new_private_virtual_interface_allocation): + """ + Provisions a private virtual interface to be owned by a + different customer. + + The owner of a connection calls this function to provision a + private virtual interface which will be owned by another AWS + customer. + + Virtual interfaces created using this function must be + confirmed by the virtual interface owner by calling + ConfirmPrivateVirtualInterface. Until this step has been + completed, the virtual interface will be in 'Confirming' + state, and will not be available for handling traffic. + + :type connection_id: string + :param connection_id: The connection ID on which the private virtual + interface is provisioned. + Default: None + + :type owner_account: string + :param owner_account: The AWS account that will own the new private + virtual interface. + Default: None + + :type new_private_virtual_interface_allocation: dict + :param new_private_virtual_interface_allocation: Detailed information + for the private virtual interface to be provisioned. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'ownerAccount': owner_account, + 'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation, + } + return self.make_request(action='AllocatePrivateVirtualInterface', + body=json.dumps(params)) + + def allocate_public_virtual_interface(self, connection_id, owner_account, + new_public_virtual_interface_allocation): + """ + Provisions a public virtual interface to be owned by a + different customer. + + The owner of a connection calls this function to provision a + public virtual interface which will be owned by another AWS + customer. + + Virtual interfaces created using this function must be + confirmed by the virtual interface owner by calling + ConfirmPublicVirtualInterface. Until this step has been + completed, the virtual interface will be in 'Confirming' + state, and will not be available for handling traffic. + + :type connection_id: string + :param connection_id: The connection ID on which the public virtual + interface is provisioned. + Default: None + + :type owner_account: string + :param owner_account: The AWS account that will own the new public + virtual interface. + Default: None + + :type new_public_virtual_interface_allocation: dict + :param new_public_virtual_interface_allocation: Detailed information + for the public virtual interface to be provisioned. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'ownerAccount': owner_account, + 'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation, + } + return self.make_request(action='AllocatePublicVirtualInterface', + body=json.dumps(params)) + + def confirm_connection(self, connection_id): + """ + Confirm the creation of a hosted connection on an + interconnect. + + Upon creation, the hosted connection is initially in the + 'Ordering' state, and will remain in this state until the + owner calls ConfirmConnection to confirm creation of the + hosted connection. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {'connectionId': connection_id, } + return self.make_request(action='ConfirmConnection', + body=json.dumps(params)) + + def confirm_private_virtual_interface(self, virtual_interface_id, + virtual_gateway_id): + """ + Accept ownership of a private virtual interface created by + another customer. + + After the virtual interface owner calls this function, the + virtual interface will be created and attached to the given + virtual private gateway, and will be available for handling + traffic. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + :type virtual_gateway_id: string + :param virtual_gateway_id: ID of the virtual private gateway that will + be attached to the virtual interface. + A virtual private gateway can be managed via the Amazon Virtual Private + Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action. + + Default: None + + """ + params = { + 'virtualInterfaceId': virtual_interface_id, + 'virtualGatewayId': virtual_gateway_id, + } + return self.make_request(action='ConfirmPrivateVirtualInterface', + body=json.dumps(params)) + + def confirm_public_virtual_interface(self, virtual_interface_id): + """ + Accept ownership of a public virtual interface created by + another customer. + + After the virtual interface owner calls this function, the + specified virtual interface will be created and made available + for handling traffic. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {'virtualInterfaceId': virtual_interface_id, } + return self.make_request(action='ConfirmPublicVirtualInterface', + body=json.dumps(params)) + + def create_connection(self, location, bandwidth, connection_name): + """ + Creates a new connection between the customer network and a + specific AWS Direct Connect location. + + A connection links your internal network to an AWS Direct + Connect location over a standard 1 gigabit or 10 gigabit + Ethernet fiber-optic cable. One end of the cable is connected + to your router, the other to an AWS Direct Connect router. An + AWS Direct Connect location provides access to Amazon Web + Services in the region it is associated with. You can + establish connections with AWS Direct Connect locations in + multiple regions, but a connection in one region does not + provide connectivity to other regions. + + :type location: string + :param location: Where the connection is located. + Example: EqSV5 + + Default: None + + :type bandwidth: string + :param bandwidth: Bandwidth of the connection. + Example: 1Gbps + + Default: None + + :type connection_name: string + :param connection_name: The name of the connection. + Example: " My Connection to AWS " + + Default: None + + """ + params = { + 'location': location, + 'bandwidth': bandwidth, + 'connectionName': connection_name, + } + return self.make_request(action='CreateConnection', + body=json.dumps(params)) + + def create_interconnect(self, interconnect_name, bandwidth, location): + """ + Creates a new interconnect between a AWS Direct Connect + partner's network and a specific AWS Direct Connect location. + + An interconnect is a connection which is capable of hosting + other connections. The AWS Direct Connect partner can use an + interconnect to provide sub-1Gbps AWS Direct Connect service + to tier 2 customers who do not have their own connections. + Like a standard connection, an interconnect links the AWS + Direct Connect partner's network to an AWS Direct Connect + location over a standard 1 Gbps or 10 Gbps Ethernet fiber- + optic cable. One end is connected to the partner's router, the + other to an AWS Direct Connect router. + + For each end customer, the AWS Direct Connect partner + provisions a connection on their interconnect by calling + AllocateConnectionOnInterconnect. The end customer can then + connect to AWS resources by creating a virtual interface on + their connection, using the VLAN assigned to them by the AWS + Direct Connect partner. + + :type interconnect_name: string + :param interconnect_name: The name of the interconnect. + Example: " 1G Interconnect to AWS " + + Default: None + + :type bandwidth: string + :param bandwidth: The port bandwidth + Example: 1Gbps + + Default: None + + Available values: 1Gbps,10Gbps + + :type location: string + :param location: Where the interconnect is located + Example: EqSV5 + + Default: None + + """ + params = { + 'interconnectName': interconnect_name, + 'bandwidth': bandwidth, + 'location': location, + } + return self.make_request(action='CreateInterconnect', + body=json.dumps(params)) + + def create_private_virtual_interface(self, connection_id, + new_private_virtual_interface): + """ + Creates a new private virtual interface. A virtual interface + is the VLAN that transports AWS Direct Connect traffic. A + private virtual interface supports sending traffic to a single + virtual private cloud (VPC). + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type new_private_virtual_interface: dict + :param new_private_virtual_interface: Detailed information for the + private virtual interface to be created. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'newPrivateVirtualInterface': new_private_virtual_interface, + } + return self.make_request(action='CreatePrivateVirtualInterface', + body=json.dumps(params)) + + def create_public_virtual_interface(self, connection_id, + new_public_virtual_interface): + """ + Creates a new public virtual interface. A virtual interface is + the VLAN that transports AWS Direct Connect traffic. A public + virtual interface supports sending traffic to public services + of AWS such as Amazon Simple Storage Service (Amazon S3). + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type new_public_virtual_interface: dict + :param new_public_virtual_interface: Detailed information for the + public virtual interface to be created. + Default: None + + """ + params = { + 'connectionId': connection_id, + 'newPublicVirtualInterface': new_public_virtual_interface, + } + return self.make_request(action='CreatePublicVirtualInterface', + body=json.dumps(params)) + + def delete_connection(self, connection_id): + """ + Deletes the connection. + + Deleting a connection only stops the AWS Direct Connect port + hour and data transfer charges. You need to cancel separately + with the providers any services or charges for cross-connects + or network circuits that connect you to the AWS Direct Connect + location. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {'connectionId': connection_id, } + return self.make_request(action='DeleteConnection', + body=json.dumps(params)) + + def delete_interconnect(self, interconnect_id): + """ + Deletes the specified interconnect. + + :type interconnect_id: string + :param interconnect_id: The ID of the interconnect. + Example: dxcon-abc123 + + """ + params = {'interconnectId': interconnect_id, } + return self.make_request(action='DeleteInterconnect', + body=json.dumps(params)) + + def delete_virtual_interface(self, virtual_interface_id): + """ + Deletes a virtual interface. + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {'virtualInterfaceId': virtual_interface_id, } + return self.make_request(action='DeleteVirtualInterface', + body=json.dumps(params)) + + def describe_connections(self, connection_id=None): + """ + Displays all connections in this region. + + If a connection ID is provided, the call returns only that + particular connection. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + """ + params = {} + if connection_id is not None: + params['connectionId'] = connection_id + return self.make_request(action='DescribeConnections', + body=json.dumps(params)) + + def describe_connections_on_interconnect(self, interconnect_id): + """ + Return a list of connections that have been provisioned on the + given interconnect. + + :type interconnect_id: string + :param interconnect_id: ID of the interconnect on which a list of + connection is provisioned. + Example: dxcon-abc123 + + Default: None + + """ + params = {'interconnectId': interconnect_id, } + return self.make_request(action='DescribeConnectionsOnInterconnect', + body=json.dumps(params)) + + def describe_interconnects(self, interconnect_id=None): + """ + Returns a list of interconnects owned by the AWS account. + + If an interconnect ID is provided, it will only return this + particular interconnect. + + :type interconnect_id: string + :param interconnect_id: The ID of the interconnect. + Example: dxcon-abc123 + + """ + params = {} + if interconnect_id is not None: + params['interconnectId'] = interconnect_id + return self.make_request(action='DescribeInterconnects', + body=json.dumps(params)) + + def describe_locations(self): + """ + Returns the list of AWS Direct Connect locations in the + current AWS region. These are the locations that may be + selected when calling CreateConnection or CreateInterconnect. + """ + params = {} + return self.make_request(action='DescribeLocations', + body=json.dumps(params)) + + def describe_virtual_gateways(self): + """ + Returns a list of virtual private gateways owned by the AWS + account. + + You can create one or more AWS Direct Connect private virtual + interfaces linking to a virtual private gateway. A virtual + private gateway can be managed via Amazon Virtual Private + Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action. + """ + params = {} + return self.make_request(action='DescribeVirtualGateways', + body=json.dumps(params)) + + def describe_virtual_interfaces(self, connection_id=None, + virtual_interface_id=None): + """ + Displays all virtual interfaces for an AWS account. Virtual + interfaces deleted fewer than 15 minutes before + DescribeVirtualInterfaces is called are also returned. If a + connection ID is included then only virtual interfaces + associated with this connection will be returned. If a virtual + interface ID is included then only a single virtual interface + will be returned. + + A virtual interface (VLAN) transmits the traffic between the + AWS Direct Connect location and the customer. + + If a connection ID is provided, only virtual interfaces + provisioned on the specified connection will be returned. If a + virtual interface ID is provided, only this particular virtual + interface will be returned. + + :type connection_id: string + :param connection_id: ID of the connection. + Example: dxcon-fg5678gh + + Default: None + + :type virtual_interface_id: string + :param virtual_interface_id: ID of the virtual interface. + Example: dxvif-123dfg56 + + Default: None + + """ + params = {} + if connection_id is not None: + params['connectionId'] = connection_id + if virtual_interface_id is not None: + params['virtualInterfaceId'] = virtual_interface_id + return self.make_request(action='DescribeVirtualInterfaces', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/dynamodb/__init__.py b/ext/boto/dynamodb/__init__.py new file mode 100644 index 0000000000..078bfef647 --- /dev/null +++ b/ext/boto/dynamodb/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon DynamoDB service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.dynamodb.layer2 import Layer2 + return get_regions('dynamodb', connection_cls=Layer2) + + +def connect_to_region(region_name, **kw_params): + from boto.dynamodb.layer2 import Layer2 + return connect('dynamodb', region_name, connection_cls=Layer2, **kw_params) diff --git a/ext/boto/dynamodb/batch.py b/ext/boto/dynamodb/batch.py new file mode 100644 index 0000000000..f30b8425c6 --- /dev/null +++ b/ext/boto/dynamodb/batch.py @@ -0,0 +1,261 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import six + + +class Batch(object): + """ + Used to construct a BatchGet request. + + :ivar table: The Table object from which the item is retrieved. + + :ivar keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :ivar attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :ivar consistent_read: Specify whether or not to use a + consistent read. Defaults to False. + + """ + + def __init__(self, table, keys, attributes_to_get=None, + consistent_read=False): + self.table = table + self.keys = keys + self.attributes_to_get = attributes_to_get + self.consistent_read = consistent_read + + def to_dict(self): + """ + Convert the Batch object into the format required for Layer1. + """ + batch_dict = {} + key_list = [] + for key in self.keys: + if isinstance(key, tuple): + hash_key, range_key = key + else: + hash_key = key + range_key = None + k = self.table.layer2.build_key_from_values(self.table.schema, + hash_key, range_key) + key_list.append(k) + batch_dict['Keys'] = key_list + if self.attributes_to_get: + batch_dict['AttributesToGet'] = self.attributes_to_get + if self.consistent_read: + batch_dict['ConsistentRead'] = True + else: + batch_dict['ConsistentRead'] = False + return batch_dict + + +class BatchWrite(object): + """ + Used to construct a BatchWrite request. Each BatchWrite object + represents a collection of PutItem and DeleteItem requests for + a single Table. + + :ivar table: The Table object from which the item is retrieved. + + :ivar puts: A list of :class:`boto.dynamodb.item.Item` objects + that you want to write to DynamoDB. + + :ivar deletes: A list of scalar or tuple values. Each element in the + list represents one Item to delete. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. + """ + + def __init__(self, table, puts=None, deletes=None): + self.table = table + self.puts = puts or [] + self.deletes = deletes or [] + + def to_dict(self): + """ + Convert the Batch object into the format required for Layer1. + """ + op_list = [] + for item in self.puts: + d = {'Item': self.table.layer2.dynamize_item(item)} + d = {'PutRequest': d} + op_list.append(d) + for key in self.deletes: + if isinstance(key, tuple): + hash_key, range_key = key + else: + hash_key = key + range_key = None + k = self.table.layer2.build_key_from_values(self.table.schema, + hash_key, range_key) + d = {'Key': k} + op_list.append({'DeleteRequest': d}) + return (self.table.name, op_list) + + +class BatchList(list): + """ + A subclass of a list object that contains a collection of + :class:`boto.dynamodb.batch.Batch` objects. + """ + + def __init__(self, layer2): + list.__init__(self) + self.unprocessed = None + self.layer2 = layer2 + + def add_batch(self, table, keys, attributes_to_get=None, + consistent_read=False): + """ + Add a Batch to this BatchList. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object in which the items are contained. + + :type keys: list + :param keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + """ + self.append(Batch(table, keys, attributes_to_get, consistent_read)) + + def resubmit(self): + """ + Resubmit the batch to get the next result set. The request object is + rebuild from scratch meaning that all batch added between ``submit`` + and ``resubmit`` will be lost. + + Note: This method is experimental and subject to changes in future releases + """ + del self[:] + + if not self.unprocessed: + return None + + for table_name, table_req in six.iteritems(self.unprocessed): + table_keys = table_req['Keys'] + table = self.layer2.get_table(table_name) + + keys = [] + for key in table_keys: + h = key['HashKeyElement'] + r = None + if 'RangeKeyElement' in key: + r = key['RangeKeyElement'] + keys.append((h, r)) + + attributes_to_get = None + if 'AttributesToGet' in table_req: + attributes_to_get = table_req['AttributesToGet'] + + self.add_batch(table, keys, attributes_to_get=attributes_to_get) + + return self.submit() + + def submit(self): + res = self.layer2.batch_get_item(self) + if 'UnprocessedKeys' in res: + self.unprocessed = res['UnprocessedKeys'] + return res + + def to_dict(self): + """ + Convert a BatchList object into format required for Layer1. + """ + d = {} + for batch in self: + b = batch.to_dict() + if b['Keys']: + d[batch.table.name] = b + return d + + +class BatchWriteList(list): + """ + A subclass of a list object that contains a collection of + :class:`boto.dynamodb.batch.BatchWrite` objects. + """ + + def __init__(self, layer2): + list.__init__(self) + self.layer2 = layer2 + + def add_batch(self, table, puts=None, deletes=None): + """ + Add a BatchWrite to this BatchWriteList. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object in which the items are contained. + + :type puts: list of :class:`boto.dynamodb.item.Item` objects + :param puts: A list of items that you want to write to DynamoDB. + + :type deletes: A list + :param deletes: A list of scalar or tuple values. Each element + in the list represents one Item to delete. If the schema + for the table has both a HashKey and a RangeKey, each + element in the list should be a tuple consisting of + (hash_key, range_key). If the schema for the table + contains only a HashKey, each element in the list should + be a scalar value of the appropriate type for the table + schema. + """ + self.append(BatchWrite(table, puts, deletes)) + + def submit(self): + return self.layer2.batch_write_item(self) + + def to_dict(self): + """ + Convert a BatchWriteList object into format required for Layer1. + """ + d = {} + for batch in self: + table_name, batch_dict = batch.to_dict() + d[table_name] = batch_dict + return d diff --git a/ext/boto/dynamodb/condition.py b/ext/boto/dynamodb/condition.py new file mode 100644 index 0000000000..f5db538c29 --- /dev/null +++ b/ext/boto/dynamodb/condition.py @@ -0,0 +1,170 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.types import dynamize_value + + +class Condition(object): + """ + Base class for conditions. Doesn't do a darn thing but allows + is to test if something is a Condition instance or not. + """ + + def __eq__(self, other): + if isinstance(other, Condition): + return self.to_dict() == other.to_dict() + +class ConditionNoArgs(Condition): + """ + Abstract class for Conditions that require no arguments, such + as NULL or NOT_NULL. + """ + + def __repr__(self): + return '%s' % self.__class__.__name__ + + def to_dict(self): + return {'ComparisonOperator': self.__class__.__name__} + + +class ConditionOneArg(Condition): + """ + Abstract class for Conditions that require a single argument + such as EQ or NE. + """ + + def __init__(self, v1): + self.v1 = v1 + + def __repr__(self): + return '%s:%s' % (self.__class__.__name__, self.v1) + + def to_dict(self): + return {'AttributeValueList': [dynamize_value(self.v1)], + 'ComparisonOperator': self.__class__.__name__} + + +class ConditionTwoArgs(Condition): + """ + Abstract class for Conditions that require two arguments. + The only example of this currently is BETWEEN. + """ + + def __init__(self, v1, v2): + self.v1 = v1 + self.v2 = v2 + + def __repr__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2) + + def to_dict(self): + values = (self.v1, self.v2) + return {'AttributeValueList': [dynamize_value(v) for v in values], + 'ComparisonOperator': self.__class__.__name__} + + +class ConditionSeveralArgs(Condition): + """ + Abstract class for conditions that require several argument (ex: IN). + """ + + def __init__(self, values): + self.values = values + + def __repr__(self): + return '{0}({1})'.format(self.__class__.__name__, + ', '.join(self.values)) + + def to_dict(self): + return {'AttributeValueList': [dynamize_value(v) for v in self.values], + 'ComparisonOperator': self.__class__.__name__} + + +class EQ(ConditionOneArg): + + pass + + +class NE(ConditionOneArg): + + pass + + +class LE(ConditionOneArg): + + pass + + +class LT(ConditionOneArg): + + pass + + +class GE(ConditionOneArg): + + pass + + +class GT(ConditionOneArg): + + pass + + +class NULL(ConditionNoArgs): + + pass + + +class NOT_NULL(ConditionNoArgs): + + pass + + +class CONTAINS(ConditionOneArg): + + pass + + +class NOT_CONTAINS(ConditionOneArg): + + pass + + +class BEGINS_WITH(ConditionOneArg): + + pass + + +class IN(ConditionSeveralArgs): + + pass + + +class BEGINS_WITH(ConditionOneArg): + + pass + + +class BETWEEN(ConditionTwoArgs): + + pass diff --git a/ext/boto/dynamodb/exceptions.py b/ext/boto/dynamodb/exceptions.py new file mode 100644 index 0000000000..12be2d72b5 --- /dev/null +++ b/ext/boto/dynamodb/exceptions.py @@ -0,0 +1,64 @@ +""" +Exceptions that are specific to the dynamodb module. +""" +from boto.exception import BotoServerError, BotoClientError +from boto.exception import DynamoDBResponseError + + +class DynamoDBExpiredTokenError(BotoServerError): + """ + Raised when a DynamoDB security token expires. This is generally boto's + (or the user's) notice to renew their DynamoDB security tokens. + """ + pass + + +class DynamoDBKeyNotFoundError(BotoClientError): + """ + Raised when attempting to retrieve or interact with an item whose key + can't be found. + """ + pass + + +class DynamoDBItemError(BotoClientError): + """ + Raised when invalid parameters are passed when creating a + new Item in DynamoDB. + """ + pass + + +class DynamoDBNumberError(BotoClientError): + """ + Raised in the event of incompatible numeric type casting. + """ + pass + + +class DynamoDBConditionalCheckFailedError(DynamoDBResponseError): + """ + Raised when a ConditionalCheckFailedException response is received. + This happens when a conditional check, expressed via the expected_value + paramenter, fails. + """ + pass + + +class DynamoDBValidationError(DynamoDBResponseError): + """ + Raised when a ValidationException response is received. This happens + when one or more required parameter values are missing, or if the item + has exceeded the 64Kb size limit. + """ + pass + + +class DynamoDBThroughputExceededError(DynamoDBResponseError): + """ + Raised when the provisioned throughput has been exceeded. + Normally, when provisioned throughput is exceeded the operation + is retried. If the retries are exhausted then this exception + will be raised. + """ + pass diff --git a/ext/boto/dynamodb/item.py b/ext/boto/dynamodb/item.py new file mode 100644 index 0000000000..a47f22bf0e --- /dev/null +++ b/ext/boto/dynamodb/item.py @@ -0,0 +1,202 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.exceptions import DynamoDBItemError + + +class Item(dict): + """ + An item in Amazon DynamoDB. + + :ivar hash_key: The HashKey of this item. + :ivar range_key: The RangeKey of this item or None if no RangeKey + is defined. + :ivar hash_key_name: The name of the HashKey associated with this item. + :ivar range_key_name: The name of the RangeKey associated with this item. + :ivar table: The Table this item belongs to. + """ + + def __init__(self, table, hash_key=None, range_key=None, attrs=None): + self.table = table + self._updates = None + self._hash_key_name = self.table.schema.hash_key_name + self._range_key_name = self.table.schema.range_key_name + if attrs is None: + attrs = {} + if hash_key is None: + hash_key = attrs.get(self._hash_key_name, None) + self[self._hash_key_name] = hash_key + if self._range_key_name: + if range_key is None: + range_key = attrs.get(self._range_key_name, None) + self[self._range_key_name] = range_key + self._updates = {} + for key, value in attrs.items(): + if key != self._hash_key_name and key != self._range_key_name: + self[key] = value + self.consumed_units = 0 + + @property + def hash_key(self): + return self[self._hash_key_name] + + @property + def range_key(self): + return self.get(self._range_key_name) + + @property + def hash_key_name(self): + return self._hash_key_name + + @property + def range_key_name(self): + return self._range_key_name + + def add_attribute(self, attr_name, attr_value): + """ + Queue the addition of an attribute to an item in DynamoDB. + This will eventually result in an UpdateItem request being issued + with an update action of ADD when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: int|long|float|set + :param attr_value: Value which is to be added to the attribute. + """ + self._updates[attr_name] = ("ADD", attr_value) + + def delete_attribute(self, attr_name, attr_value=None): + """ + Queue the deletion of an attribute from an item in DynamoDB. + This call will result in a UpdateItem request being issued + with update action of DELETE when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: set + :param attr_value: A set of values to be removed from the attribute. + This parameter is optional. If None, the whole attribute is + removed from the item. + """ + self._updates[attr_name] = ("DELETE", attr_value) + + def put_attribute(self, attr_name, attr_value): + """ + Queue the putting of an attribute to an item in DynamoDB. + This call will result in an UpdateItem request being issued + with the update action of PUT when the save method is called. + + :type attr_name: str + :param attr_name: Name of the attribute you want to alter. + + :type attr_value: int|long|float|str|set + :param attr_value: New value of the attribute. + """ + self._updates[attr_name] = ("PUT", attr_value) + + def save(self, expected_value=None, return_values=None): + """ + Commits pending updates to Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value is + either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute name/value pairs + before they were updated. Possible values are: None, 'ALL_OLD', + 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is + specified and the item is overwritten, the content of the old item + is returned. If 'ALL_NEW' is specified, then all the attributes of + the new version of the item are returned. If 'UPDATED_NEW' is + specified, the new versions of only the updated attributes are + returned. + """ + return self.table.layer2.update_item(self, expected_value, + return_values) + + def delete(self, expected_value=None, return_values=None): + """ + Delete the item from DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value + is either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + return self.table.layer2.delete_item(self, expected_value, + return_values) + + def put(self, expected_value=None, return_values=None): + """ + Store a new item or completely replace an existing item + in Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that + you expect. This dictionary should have name/value pairs + where the name is the name of the attribute and the value + is either the value you are expecting or False if you expect + the attribute not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + return self.table.layer2.put_item(self, expected_value, return_values) + + def __setitem__(self, key, value): + """Overrwrite the setter to instead update the _updates + method so this can act like a normal dict""" + if self._updates is not None: + self.put_attribute(key, value) + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + """Remove this key from the items""" + if self._updates is not None: + self.delete_attribute(key) + dict.__delitem__(self, key) + + # Allow this item to still be pickled + def __getstate__(self): + return self.__dict__ + def __setstate__(self, d): + self.__dict__.update(d) diff --git a/ext/boto/dynamodb/layer1.py b/ext/boto/dynamodb/layer1.py new file mode 100644 index 0000000000..0984f71ab4 --- /dev/null +++ b/ext/boto/dynamodb/layer1.py @@ -0,0 +1,577 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import time +from binascii import crc32 + +import boto +from boto.connection import AWSAuthConnection +from boto.exception import DynamoDBResponseError +from boto.provider import Provider +from boto.dynamodb import exceptions as dynamodb_exceptions +from boto.compat import json + + +class Layer1(AWSAuthConnection): + """ + This is the lowest-level interface to DynamoDB. Methods at this + layer map directly to API requests and parameters to the methods + are either simple, scalar values or they are the Python equivalent + of the JSON input as defined in the DynamoDB Developer's Guide. + All responses are direct decoding of the JSON response bodies to + Python data structures via the json or simplejson modules. + + :ivar throughput_exceeded_events: An integer variable that + keeps a running total of the number of ThroughputExceeded + responses this connection has received from Amazon DynamoDB. + """ + + DefaultRegionName = 'us-east-1' + """The default region name for DynamoDB API.""" + + ServiceName = 'DynamoDB' + """The name of the Service""" + + Version = '20111205' + """DynamoDB API version.""" + + ThruputError = "ProvisionedThroughputExceededException" + """The error response returned when provisioned throughput is exceeded""" + + SessionExpiredError = 'com.amazon.coral.service#ExpiredTokenException' + """The error response returned when session token has expired""" + + ConditionalCheckFailedError = 'ConditionalCheckFailedException' + """The error response returned when a conditional check fails""" + + ValidationError = 'ValidationException' + """The error response returned when an item is invalid in some way""" + + ResponseError = DynamoDBResponseError + + NumberRetries = 10 + """The number of times an error is retried.""" + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, security_token=None, region=None, + validate_certs=True, validate_checksums=True, profile_name=None): + if not region: + region_name = boto.config.get('DynamoDB', 'region', + self.DefaultRegionName) + for reg in boto.dynamodb.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(Layer1, self).__init__(self.region.endpoint, + aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug=debug, security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.throughput_exceeded_events = 0 + self._validate_checksums = boto.config.getbool( + 'DynamoDB', 'validate_checksums', validate_checksums) + + def _get_session_token(self): + self.provider = Provider(self._provider_type) + self._auth_handler.update_provider(self.provider) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def make_request(self, action, body='', object_hook=None): + """ + :raises: ``DynamoDBExpiredTokenError`` if the security token expires. + """ + headers = {'X-Amz-Target': '%s_%s.%s' % (self.ServiceName, + self.Version, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.0', + 'Content-Length': str(len(body))} + http_request = self.build_base_http_request('POST', '/', '/', + {}, headers, body, None) + start = time.time() + response = self._mexe(http_request, sender=None, + override_num_retries=self.NumberRetries, + retry_handler=self._retry_handler) + elapsed = (time.time() - start) * 1000 + request_id = response.getheader('x-amzn-RequestId') + boto.log.debug('RequestId: %s' % request_id) + boto.perflog.debug('%s: id=%s time=%sms', + headers['X-Amz-Target'], request_id, int(elapsed)) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + return json.loads(response_body, object_hook=object_hook) + + def _retry_handler(self, response, i, next_sleep): + status = None + if response.status == 400: + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + data = json.loads(response_body) + if self.ThruputError in data.get('__type'): + self.throughput_exceeded_events += 1 + msg = "%s, retry attempt %s" % (self.ThruputError, i) + next_sleep = self._exponential_time(i) + i += 1 + status = (msg, i, next_sleep) + if i == self.NumberRetries: + # If this was our last retry attempt, raise + # a specific error saying that the throughput + # was exceeded. + raise dynamodb_exceptions.DynamoDBThroughputExceededError( + response.status, response.reason, data) + elif self.SessionExpiredError in data.get('__type'): + msg = 'Renewing Session Token' + self._get_session_token() + status = (msg, i + self.num_retries - 1, 0) + elif self.ConditionalCheckFailedError in data.get('__type'): + raise dynamodb_exceptions.DynamoDBConditionalCheckFailedError( + response.status, response.reason, data) + elif self.ValidationError in data.get('__type'): + raise dynamodb_exceptions.DynamoDBValidationError( + response.status, response.reason, data) + else: + raise self.ResponseError(response.status, response.reason, + data) + expected_crc32 = response.getheader('x-amz-crc32') + if self._validate_checksums and expected_crc32 is not None: + boto.log.debug('Validating crc32 checksum for body: %s', + response.read().decode('utf-8')) + actual_crc32 = crc32(response.read()) & 0xffffffff + expected_crc32 = int(expected_crc32) + if actual_crc32 != expected_crc32: + msg = ("The calculated checksum %s did not match the expected " + "checksum %s" % (actual_crc32, expected_crc32)) + status = (msg, i + 1, self._exponential_time(i)) + return status + + def _exponential_time(self, i): + if i == 0: + next_sleep = 0 + else: + next_sleep = min(0.05 * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + return next_sleep + + def list_tables(self, limit=None, start_table=None): + """ + Returns a dictionary of results. The dictionary contains + a **TableNames** key whose value is a list of the table names. + The dictionary could also contain a **LastEvaluatedTableName** + key whose value would be the last table name returned if + the complete list of table names was not returned. This + value would then be passed as the ``start_table`` parameter on + a subsequent call to this method. + + :type limit: int + :param limit: The maximum number of tables to return. + + :type start_table: str + :param start_table: The name of the table that starts the + list. If you ran a previous list_tables and not + all results were returned, the response dict would + include a LastEvaluatedTableName attribute. Use + that value here to continue the listing. + """ + data = {} + if limit: + data['Limit'] = limit + if start_table: + data['ExclusiveStartTableName'] = start_table + json_input = json.dumps(data) + return self.make_request('ListTables', json_input) + + def describe_table(self, table_name): + """ + Returns information about the table including current + state of the table, primary key schema and when the + table was created. + + :type table_name: str + :param table_name: The name of the table to describe. + """ + data = {'TableName': table_name} + json_input = json.dumps(data) + return self.make_request('DescribeTable', json_input) + + def create_table(self, table_name, schema, provisioned_throughput): + """ + Add a new table to your account. The table name must be unique + among those associated with the account issuing the request. + This request triggers an asynchronous workflow to begin creating + the table. When the workflow is complete, the state of the + table will be ACTIVE. + + :type table_name: str + :param table_name: The name of the table to create. + + :type schema: dict + :param schema: A Python version of the KeySchema data structure + as defined by DynamoDB + + :type provisioned_throughput: dict + :param provisioned_throughput: A Python version of the + ProvisionedThroughput data structure defined by + DynamoDB. + """ + data = {'TableName': table_name, + 'KeySchema': schema, + 'ProvisionedThroughput': provisioned_throughput} + json_input = json.dumps(data) + response_dict = self.make_request('CreateTable', json_input) + return response_dict + + def update_table(self, table_name, provisioned_throughput): + """ + Updates the provisioned throughput for a given table. + + :type table_name: str + :param table_name: The name of the table to update. + + :type provisioned_throughput: dict + :param provisioned_throughput: A Python version of the + ProvisionedThroughput data structure defined by + DynamoDB. + """ + data = {'TableName': table_name, + 'ProvisionedThroughput': provisioned_throughput} + json_input = json.dumps(data) + return self.make_request('UpdateTable', json_input) + + def delete_table(self, table_name): + """ + Deletes the table and all of it's data. After this request + the table will be in the DELETING state until DynamoDB + completes the delete operation. + + :type table_name: str + :param table_name: The name of the table to delete. + """ + data = {'TableName': table_name} + json_input = json.dumps(data) + return self.make_request('DeleteTable', json_input) + + def get_item(self, table_name, key, attributes_to_get=None, + consistent_read=False, object_hook=None): + """ + Return a set of attributes for an item that matches + the supplied key. + + :type table_name: str + :param table_name: The name of the table containing the item. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + """ + data = {'TableName': table_name, + 'Key': key} + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if consistent_read: + data['ConsistentRead'] = True + json_input = json.dumps(data) + response = self.make_request('GetItem', json_input, + object_hook=object_hook) + if 'Item' not in response: + raise dynamodb_exceptions.DynamoDBKeyNotFoundError( + "Key does not exist." + ) + return response + + def batch_get_item(self, request_items, object_hook=None): + """ + Return a set of attributes for a multiple items in + multiple tables using their primary keys. + + :type request_items: dict + :param request_items: A Python version of the RequestItems + data structure defined by DynamoDB. + """ + # If the list is empty, return empty response + if not request_items: + return {} + data = {'RequestItems': request_items} + json_input = json.dumps(data) + return self.make_request('BatchGetItem', json_input, + object_hook=object_hook) + + def batch_write_item(self, request_items, object_hook=None): + """ + This operation enables you to put or delete several items + across multiple tables in a single API call. + + :type request_items: dict + :param request_items: A Python version of the RequestItems + data structure defined by DynamoDB. + """ + data = {'RequestItems': request_items} + json_input = json.dumps(data) + return self.make_request('BatchWriteItem', json_input, + object_hook=object_hook) + + def put_item(self, table_name, item, + expected=None, return_values=None, + object_hook=None): + """ + Create a new item or replace an old item with a new + item (including all attributes). If an item already + exists in the specified table with the same primary + key, the new item will completely replace the old item. + You can perform a conditional put by specifying an + expected rule. + + :type table_name: str + :param table_name: The name of the table in which to put the item. + + :type item: dict + :param item: A Python version of the Item data structure + defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Item': item} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('PutItem', json_input, + object_hook=object_hook) + + def update_item(self, table_name, key, attribute_updates, + expected=None, return_values=None, + object_hook=None): + """ + Edits an existing item's attributes. You can perform a conditional + update (insert a new attribute name-value pair if it doesn't exist, + or replace an existing name-value pair if it has certain expected + attribute values). + + :type table_name: str + :param table_name: The name of the table. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB which identifies the item to be updated. + + :type attribute_updates: dict + :param attribute_updates: A Python version of the AttributeUpdates + data structure defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Key': key, + 'AttributeUpdates': attribute_updates} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('UpdateItem', json_input, + object_hook=object_hook) + + def delete_item(self, table_name, key, + expected=None, return_values=None, + object_hook=None): + """ + Delete an item and all of it's attributes by primary key. + You can perform a conditional delete by specifying an + expected rule. + + :type table_name: str + :param table_name: The name of the table containing the item. + + :type key: dict + :param key: A Python version of the Key data structure + defined by DynamoDB. + + :type expected: dict + :param expected: A Python version of the Expected + data structure defined by DynamoDB. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + data = {'TableName': table_name, + 'Key': key} + if expected: + data['Expected'] = expected + if return_values: + data['ReturnValues'] = return_values + json_input = json.dumps(data) + return self.make_request('DeleteItem', json_input, + object_hook=object_hook) + + def query(self, table_name, hash_key_value, range_key_conditions=None, + attributes_to_get=None, limit=None, consistent_read=False, + scan_index_forward=True, exclusive_start_key=None, + object_hook=None, count=False): + """ + Perform a query of DynamoDB. This version is currently punting + and expecting you to provide a full and correct JSON body + which is passed as is to DynamoDB. + + :type table_name: str + :param table_name: The name of the table to query. + + :type hash_key_value: dict + :param key: A DynamoDB-style HashKeyValue. + + :type range_key_conditions: dict + :param range_key_conditions: A Python version of the + RangeKeyConditions data structure. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type limit: int + :param limit: The maximum number of items to return. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + """ + data = {'TableName': table_name, + 'HashKeyValue': hash_key_value} + if range_key_conditions: + data['RangeKeyCondition'] = range_key_conditions + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if limit: + data['Limit'] = limit + if count: + data['Count'] = True + if consistent_read: + data['ConsistentRead'] = True + if scan_index_forward: + data['ScanIndexForward'] = True + else: + data['ScanIndexForward'] = False + if exclusive_start_key: + data['ExclusiveStartKey'] = exclusive_start_key + json_input = json.dumps(data) + return self.make_request('Query', json_input, + object_hook=object_hook) + + def scan(self, table_name, scan_filter=None, + attributes_to_get=None, limit=None, + exclusive_start_key=None, object_hook=None, count=False): + """ + Perform a scan of DynamoDB. This version is currently punting + and expecting you to provide a full and correct JSON body + which is passed as is to DynamoDB. + + :type table_name: str + :param table_name: The name of the table to scan. + + :type scan_filter: dict + :param scan_filter: A Python version of the + ScanFilter data structure. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type limit: int + :param limit: The maximum number of items to evaluate. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + """ + data = {'TableName': table_name} + if scan_filter: + data['ScanFilter'] = scan_filter + if attributes_to_get: + data['AttributesToGet'] = attributes_to_get + if limit: + data['Limit'] = limit + if count: + data['Count'] = True + if exclusive_start_key: + data['ExclusiveStartKey'] = exclusive_start_key + json_input = json.dumps(data) + return self.make_request('Scan', json_input, object_hook=object_hook) diff --git a/ext/boto/dynamodb/layer2.py b/ext/boto/dynamodb/layer2.py new file mode 100644 index 0000000000..9510d49927 --- /dev/null +++ b/ext/boto/dynamodb/layer2.py @@ -0,0 +1,806 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.dynamodb.layer1 import Layer1 +from boto.dynamodb.table import Table +from boto.dynamodb.schema import Schema +from boto.dynamodb.item import Item +from boto.dynamodb.batch import BatchList, BatchWriteList +from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \ + LossyFloatDynamizer, NonBooleanDynamizer + + +class TableGenerator(object): + """ + This is an object that wraps up the table_generator function. + The only real reason to have this is that we want to be able + to accumulate and return the ConsumedCapacityUnits element that + is part of each response. + + :ivar last_evaluated_key: A sequence representing the key(s) + of the item last evaluated, or None if no additional + results are available. + + :ivar remaining: The remaining quantity of results requested. + + :ivar table: The table to which the call was made. + """ + + def __init__(self, table, callable, remaining, item_class, kwargs): + self.table = table + self.callable = callable + self.remaining = -1 if remaining is None else remaining + self.item_class = item_class + self.kwargs = kwargs + self._consumed_units = 0.0 + self.last_evaluated_key = None + self._count = 0 + self._scanned_count = 0 + self._response = None + + @property + def count(self): + """ + The total number of items retrieved thus far. This value changes with + iteration and even when issuing a call with count=True, it is necessary + to complete the iteration to assert an accurate count value. + """ + self.response + return self._count + + @property + def scanned_count(self): + """ + As above, but representing the total number of items scanned by + DynamoDB, without regard to any filters. + """ + self.response + return self._scanned_count + + @property + def consumed_units(self): + """ + Returns a float representing the ConsumedCapacityUnits accumulated. + """ + self.response + return self._consumed_units + + @property + def response(self): + """ + The current response to the call from DynamoDB. + """ + return self.next_response() if self._response is None else self._response + + def next_response(self): + """ + Issue a call and return the result. You can invoke this method + while iterating over the TableGenerator in order to skip to the + next "page" of results. + """ + # preserve any existing limit in case the user alters self.remaining + limit = self.kwargs.get('limit') + if (self.remaining > 0 and (limit is None or limit > self.remaining)): + self.kwargs['limit'] = self.remaining + self._response = self.callable(**self.kwargs) + self.kwargs['limit'] = limit + self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0) + self._count += self._response.get('Count', 0) + self._scanned_count += self._response.get('ScannedCount', 0) + # at the expense of a possibly gratuitous dynamize, ensure that + # early generator termination won't result in bad LEK values + if 'LastEvaluatedKey' in self._response: + lek = self._response['LastEvaluatedKey'] + esk = self.table.layer2.dynamize_last_evaluated_key(lek) + self.kwargs['exclusive_start_key'] = esk + lektuple = (lek['HashKeyElement'],) + if 'RangeKeyElement' in lek: + lektuple += (lek['RangeKeyElement'],) + self.last_evaluated_key = lektuple + else: + self.last_evaluated_key = None + return self._response + + def __iter__(self): + while self.remaining != 0: + response = self.response + for item in response.get('Items', []): + self.remaining -= 1 + yield self.item_class(self.table, attrs=item) + if self.remaining == 0: + break + if response is not self._response: + break + else: + if self.last_evaluated_key is not None: + self.next_response() + continue + break + if response is not self._response: + continue + break + + +class Layer2(object): + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, security_token=None, region=None, + validate_certs=True, dynamizer=LossyFloatDynamizer, + profile_name=None): + self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug, security_token, region, + validate_certs=validate_certs, + profile_name=profile_name) + self.dynamizer = dynamizer() + + def use_decimals(self, use_boolean=False): + """ + Use the ``decimal.Decimal`` type for encoding/decoding numeric types. + + By default, ints/floats are used to represent numeric types + ('N', 'NS') received from DynamoDB. Using the ``Decimal`` + type is recommended to prevent loss of precision. + + """ + # Eventually this should be made the default dynamizer. + self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer() + + def dynamize_attribute_updates(self, pending_updates): + """ + Convert a set of pending item updates into the structure + required by Layer1. + """ + d = {} + for attr_name in pending_updates: + action, value = pending_updates[attr_name] + if value is None: + # DELETE without an attribute value + d[attr_name] = {"Action": action} + else: + d[attr_name] = {"Action": action, + "Value": self.dynamizer.encode(value)} + return d + + def dynamize_item(self, item): + d = {} + for attr_name in item: + d[attr_name] = self.dynamizer.encode(item[attr_name]) + return d + + def dynamize_range_key_condition(self, range_key_condition): + """ + Convert a layer2 range_key_condition parameter into the + structure required by Layer1. + """ + return range_key_condition.to_dict() + + def dynamize_scan_filter(self, scan_filter): + """ + Convert a layer2 scan_filter parameter into the + structure required by Layer1. + """ + d = None + if scan_filter: + d = {} + for attr_name in scan_filter: + condition = scan_filter[attr_name] + d[attr_name] = condition.to_dict() + return d + + def dynamize_expected_value(self, expected_value): + """ + Convert an expected_value parameter into the data structure + required for Layer1. + """ + d = None + if expected_value: + d = {} + for attr_name in expected_value: + attr_value = expected_value[attr_name] + if attr_value is True: + attr_value = {'Exists': True} + elif attr_value is False: + attr_value = {'Exists': False} + else: + val = self.dynamizer.encode(expected_value[attr_name]) + attr_value = {'Value': val} + d[attr_name] = attr_value + return d + + def dynamize_last_evaluated_key(self, last_evaluated_key): + """ + Convert a last_evaluated_key parameter into the data structure + required for Layer1. + """ + d = None + if last_evaluated_key: + hash_key = last_evaluated_key['HashKeyElement'] + d = {'HashKeyElement': self.dynamizer.encode(hash_key)} + if 'RangeKeyElement' in last_evaluated_key: + range_key = last_evaluated_key['RangeKeyElement'] + d['RangeKeyElement'] = self.dynamizer.encode(range_key) + return d + + def build_key_from_values(self, schema, hash_key, range_key=None): + """ + Build a Key structure to be used for accessing items + in Amazon DynamoDB. This method takes the supplied hash_key + and optional range_key and validates them against the + schema. If there is a mismatch, a TypeError is raised. + Otherwise, a Python dict version of a Amazon DynamoDB Key + data structure is returned. + + :type hash_key: int|float|str|unicode|Binary + :param hash_key: The hash key of the item you are looking for. + The type of the hash key should match the type defined in + the schema. + + :type range_key: int|float|str|unicode|Binary + :param range_key: The range key of the item your are looking for. + This should be supplied only if the schema requires a + range key. The type of the range key should match the + type defined in the schema. + """ + dynamodb_key = {} + dynamodb_value = self.dynamizer.encode(hash_key) + if list(dynamodb_value.keys())[0] != schema.hash_key_type: + msg = 'Hashkey must be of type: %s' % schema.hash_key_type + raise TypeError(msg) + dynamodb_key['HashKeyElement'] = dynamodb_value + if range_key is not None: + dynamodb_value = self.dynamizer.encode(range_key) + if list(dynamodb_value.keys())[0] != schema.range_key_type: + msg = 'RangeKey must be of type: %s' % schema.range_key_type + raise TypeError(msg) + dynamodb_key['RangeKeyElement'] = dynamodb_value + return dynamodb_key + + def new_batch_list(self): + """ + Return a new, empty :class:`boto.dynamodb.batch.BatchList` + object. + """ + return BatchList(self) + + def new_batch_write_list(self): + """ + Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList` + object. + """ + return BatchWriteList(self) + + def list_tables(self, limit=None): + """ + Return a list of the names of all tables associated with the + current account and region. + + :type limit: int + :param limit: The maximum number of tables to return. + """ + tables = [] + start_table = None + while not limit or len(tables) < limit: + this_round_limit = None + if limit: + this_round_limit = limit - len(tables) + this_round_limit = min(this_round_limit, 100) + result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table) + tables.extend(result.get('TableNames', [])) + start_table = result.get('LastEvaluatedTableName', None) + if not start_table: + break + return tables + + def describe_table(self, name): + """ + Retrieve information about an existing table. + + :type name: str + :param name: The name of the desired table. + + """ + return self.layer1.describe_table(name) + + def table_from_schema(self, name, schema): + """ + Create a Table object from a schema. + + This method will create a Table object without + making any API calls. If you know the name and schema + of the table, you can use this method instead of + ``get_table``. + + Example usage:: + + table = layer2.table_from_schema( + 'tablename', + Schema.create(hash_key=('foo', 'N'))) + + :type name: str + :param name: The name of the table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The schema associated with the table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + + """ + return Table.create_from_schema(self, name, schema) + + def get_table(self, name): + """ + Retrieve the Table object for an existing table. + + :type name: str + :param name: The name of the desired table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + """ + response = self.layer1.describe_table(name) + return Table(self, response) + + lookup = get_table + + def create_table(self, name, schema, read_units, write_units): + """ + Create a new Amazon DynamoDB table. + + :type name: str + :param name: The name of the desired table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The Schema object that defines the schema used + by this table. + + :type read_units: int + :param read_units: The value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The value for WriteCapacityUnits. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the new Amazon DynamoDB table. + """ + response = self.layer1.create_table(name, schema.dict, + {'ReadCapacityUnits': read_units, + 'WriteCapacityUnits': write_units}) + return Table(self, response) + + def update_throughput(self, table, read_units, write_units): + """ + Update the ProvisionedThroughput for the Amazon DynamoDB Table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object whose throughput is being updated. + + :type read_units: int + :param read_units: The new value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The new value for WriteCapacityUnits. + """ + response = self.layer1.update_table(table.name, + {'ReadCapacityUnits': read_units, + 'WriteCapacityUnits': write_units}) + table.update_from_response(response) + + def delete_table(self, table): + """ + Delete this table and all items in it. After calling this + the Table objects status attribute will be set to 'DELETING'. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being deleted. + """ + response = self.layer1.delete_table(table.name) + table.update_from_response(response) + + def create_schema(self, hash_key_name, hash_key_proto_value, + range_key_name=None, range_key_proto_value=None): + """ + Create a Schema object used when creating a Table. + + :type hash_key_name: str + :param hash_key_name: The name of the HashKey for the schema. + + :type hash_key_proto_value: int|long|float|str|unicode|Binary + :param hash_key_proto_value: A sample or prototype of the type + of value you want to use for the HashKey. Alternatively, + you can also just pass in the Python type (e.g. int, float, etc.). + + :type range_key_name: str + :param range_key_name: The name of the RangeKey for the schema. + This parameter is optional. + + :type range_key_proto_value: int|long|float|str|unicode|Binary + :param range_key_proto_value: A sample or prototype of the type + of value you want to use for the RangeKey. Alternatively, + you can also pass in the Python type (e.g. int, float, etc.) + This parameter is optional. + """ + hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value)) + if range_key_name and range_key_proto_value is not None: + range_key = (range_key_name, + get_dynamodb_type(range_key_proto_value)) + else: + range_key = None + return Schema.create(hash_key, range_key) + + def get_item(self, table, hash_key, range_key=None, + attributes_to_get=None, consistent_read=False, + item_class=Item): + """ + Retrieve an existing item from the table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object from which the item is retrieved. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + key = self.build_key_from_values(table.schema, hash_key, range_key) + response = self.layer1.get_item(table.name, key, + attributes_to_get, consistent_read, + object_hook=self.dynamizer.decode) + item = item_class(table, hash_key, range_key, response['Item']) + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return item + + def batch_get_item(self, batch_list): + """ + Return a set of attributes for a multiple items in + multiple tables using their primary keys. + + :type batch_list: :class:`boto.dynamodb.batch.BatchList` + :param batch_list: A BatchList object which consists of a + list of :class:`boto.dynamoddb.batch.Batch` objects. + Each Batch object contains the information about one + batch of objects that you wish to retrieve in this + request. + """ + request_items = batch_list.to_dict() + return self.layer1.batch_get_item(request_items, + object_hook=self.dynamizer.decode) + + def batch_write_item(self, batch_list): + """ + Performs multiple Puts and Deletes in one batch. + + :type batch_list: :class:`boto.dynamodb.batch.BatchWriteList` + :param batch_list: A BatchWriteList object which consists of a + list of :class:`boto.dynamoddb.batch.BatchWrite` objects. + Each Batch object contains the information about one + batch of objects that you wish to put or delete. + """ + request_items = batch_list.to_dict() + return self.layer1.batch_write_item(request_items, + object_hook=self.dynamizer.decode) + + def put_item(self, item, expected_value=None, return_values=None): + """ + Store a new item or completely replace an existing item + in Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to write to Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you expect. + This dictionary should have name/value pairs where the name + is the name of the attribute and the value is either the value + you are expecting or False if you expect the attribute not to + exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + expected_value = self.dynamize_expected_value(expected_value) + response = self.layer1.put_item(item.table.name, + self.dynamize_item(item), + expected_value, return_values, + object_hook=self.dynamizer.decode) + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return response + + def update_item(self, item, expected_value=None, return_values=None): + """ + Commit pending item updates to Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to update in Amazon DynamoDB. It is expected + that you would have called the add_attribute, put_attribute + and/or delete_attribute methods on this Item prior to calling + this method. Those queued changes are what will be updated. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you + expect. This dictionary should have name/value pairs where the + name is the name of the attribute and the value is either the + value you are expecting or False if you expect the attribute + not to exist. + + :type return_values: str + :param return_values: Controls the return of attribute name/value pairs + before they were updated. Possible values are: None, 'ALL_OLD', + 'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is + specified and the item is overwritten, the content of the old item + is returned. If 'ALL_NEW' is specified, then all the attributes of + the new version of the item are returned. If 'UPDATED_NEW' is + specified, the new versions of only the updated attributes are + returned. + + """ + expected_value = self.dynamize_expected_value(expected_value) + key = self.build_key_from_values(item.table.schema, + item.hash_key, item.range_key) + attr_updates = self.dynamize_attribute_updates(item._updates) + + response = self.layer1.update_item(item.table.name, key, + attr_updates, + expected_value, return_values, + object_hook=self.dynamizer.decode) + item._updates.clear() + if 'ConsumedCapacityUnits' in response: + item.consumed_units = response['ConsumedCapacityUnits'] + return response + + def delete_item(self, item, expected_value=None, return_values=None): + """ + Delete the item from Amazon DynamoDB. + + :type item: :class:`boto.dynamodb.item.Item` + :param item: The Item to delete from Amazon DynamoDB. + + :type expected_value: dict + :param expected_value: A dictionary of name/value pairs that you expect. + This dictionary should have name/value pairs where the name + is the name of the attribute and the value is either the value + you are expecting or False if you expect the attribute not to + exist. + + :type return_values: str + :param return_values: Controls the return of attribute + name-value pairs before then were changed. Possible + values are: None or 'ALL_OLD'. If 'ALL_OLD' is + specified and the item is overwritten, the content + of the old item is returned. + """ + expected_value = self.dynamize_expected_value(expected_value) + key = self.build_key_from_values(item.table.schema, + item.hash_key, item.range_key) + return self.layer1.delete_item(item.table.name, key, + expected=expected_value, + return_values=return_values, + object_hook=self.dynamizer.decode) + + def query(self, table, hash_key, range_key_condition=None, + attributes_to_get=None, request_limit=None, + max_results=None, consistent_read=False, + scan_index_forward=True, exclusive_start_key=None, + item_class=Item, count=False): + """ + Perform a query on the table. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being queried. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key_condition: :class:`boto.dynamodb.condition.Condition` + :param range_key_condition: A Condition object. + Condition object can be one of the following types: + + EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN + + The only condition which expects or will accept two + values is 'BETWEEN', otherwise a single value should + be passed to the Condition constructor. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + if range_key_condition: + rkc = self.dynamize_range_key_condition(range_key_condition) + else: + rkc = None + if exclusive_start_key: + esk = self.build_key_from_values(table.schema, + *exclusive_start_key) + else: + esk = None + kwargs = {'table_name': table.name, + 'hash_key_value': self.dynamizer.encode(hash_key), + 'range_key_conditions': rkc, + 'attributes_to_get': attributes_to_get, + 'limit': request_limit, + 'count': count, + 'consistent_read': consistent_read, + 'scan_index_forward': scan_index_forward, + 'exclusive_start_key': esk, + 'object_hook': self.dynamizer.decode} + return TableGenerator(table, self.layer1.query, + max_results, item_class, kwargs) + + def scan(self, table, scan_filter=None, + attributes_to_get=None, request_limit=None, max_results=None, + exclusive_start_key=None, item_class=Item, count=False): + """ + Perform a scan of DynamoDB. + + :type table: :class:`boto.dynamodb.table.Table` + :param table: The Table object that is being scanned. + + :type scan_filter: A dict + :param scan_filter: A dictionary where the key is the + attribute name and the value is a + :class:`boto.dynamodb.condition.Condition` object. + Valid Condition objects include: + + * EQ - equal (1) + * NE - not equal (1) + * LE - less than or equal (1) + * LT - less than (1) + * GE - greater than or equal (1) + * GT - greater than (1) + * NOT_NULL - attribute exists (0, use None) + * NULL - attribute does not exist (0, use None) + * CONTAINS - substring or value in list (1) + * NOT_CONTAINS - absence of substring or value in list (1) + * BEGINS_WITH - substring prefix (1) + * IN - exact match in list (N) + * BETWEEN - >= first value, <= second value (2) + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + if exclusive_start_key: + esk = self.build_key_from_values(table.schema, + *exclusive_start_key) + else: + esk = None + kwargs = {'table_name': table.name, + 'scan_filter': self.dynamize_scan_filter(scan_filter), + 'attributes_to_get': attributes_to_get, + 'limit': request_limit, + 'count': count, + 'exclusive_start_key': esk, + 'object_hook': self.dynamizer.decode} + return TableGenerator(table, self.layer1.scan, + max_results, item_class, kwargs) diff --git a/ext/boto/dynamodb/schema.py b/ext/boto/dynamodb/schema.py new file mode 100644 index 0000000000..4a697a827d --- /dev/null +++ b/ext/boto/dynamodb/schema.py @@ -0,0 +1,112 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class Schema(object): + """ + Represents a DynamoDB schema. + + :ivar hash_key_name: The name of the hash key of the schema. + :ivar hash_key_type: The DynamoDB type specification for the + hash key of the schema. + :ivar range_key_name: The name of the range key of the schema + or None if no range key is defined. + :ivar range_key_type: The DynamoDB type specification for the + range key of the schema or None if no range key is defined. + :ivar dict: The underlying Python dictionary that needs to be + passed to Layer1 methods. + """ + + def __init__(self, schema_dict): + self._dict = schema_dict + + def __repr__(self): + if self.range_key_name: + s = 'Schema(%s:%s)' % (self.hash_key_name, self.range_key_name) + else: + s = 'Schema(%s)' % self.hash_key_name + return s + + @classmethod + def create(cls, hash_key, range_key=None): + """Convenience method to create a schema object. + + Example usage:: + + schema = Schema.create(hash_key=('foo', 'N')) + schema2 = Schema.create(hash_key=('foo', 'N'), + range_key=('bar', 'S')) + + :type hash_key: tuple + :param hash_key: A tuple of (hash_key_name, hash_key_type) + + :type range_key: tuple + :param hash_key: A tuple of (range_key_name, range_key_type) + + """ + reconstructed = { + 'HashKeyElement': { + 'AttributeName': hash_key[0], + 'AttributeType': hash_key[1], + } + } + if range_key is not None: + reconstructed['RangeKeyElement'] = { + 'AttributeName': range_key[0], + 'AttributeType': range_key[1], + } + instance = cls(None) + instance._dict = reconstructed + return instance + + @property + def dict(self): + return self._dict + + @property + def hash_key_name(self): + return self._dict['HashKeyElement']['AttributeName'] + + @property + def hash_key_type(self): + return self._dict['HashKeyElement']['AttributeType'] + + @property + def range_key_name(self): + name = None + if 'RangeKeyElement' in self._dict: + name = self._dict['RangeKeyElement']['AttributeName'] + return name + + @property + def range_key_type(self): + type = None + if 'RangeKeyElement' in self._dict: + type = self._dict['RangeKeyElement']['AttributeType'] + return type + + def __eq__(self, other): + return (self.hash_key_name == other.hash_key_name and + self.hash_key_type == other.hash_key_type and + self.range_key_name == other.range_key_name and + self.range_key_type == other.range_key_type) diff --git a/ext/boto/dynamodb/table.py b/ext/boto/dynamodb/table.py new file mode 100644 index 0000000000..152b95d908 --- /dev/null +++ b/ext/boto/dynamodb/table.py @@ -0,0 +1,546 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.dynamodb.batch import BatchList +from boto.dynamodb.schema import Schema +from boto.dynamodb.item import Item +from boto.dynamodb import exceptions as dynamodb_exceptions +import time + + +class TableBatchGenerator(object): + """ + A low-level generator used to page through results from + batch_get_item operations. + + :ivar consumed_units: An integer that holds the number of + ConsumedCapacityUnits accumulated thus far for this + generator. + """ + + def __init__(self, table, keys, attributes_to_get=None, + consistent_read=False): + self.table = table + self.keys = keys + self.consumed_units = 0 + self.attributes_to_get = attributes_to_get + self.consistent_read = consistent_read + + def _queue_unprocessed(self, res): + if u'UnprocessedKeys' not in res: + return + if self.table.name not in res[u'UnprocessedKeys']: + return + + keys = res[u'UnprocessedKeys'][self.table.name][u'Keys'] + + for key in keys: + h = key[u'HashKeyElement'] + r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None + self.keys.append((h, r)) + + def __iter__(self): + while self.keys: + # Build the next batch + batch = BatchList(self.table.layer2) + batch.add_batch(self.table, self.keys[:100], + self.attributes_to_get) + res = batch.submit() + + # parse the results + if self.table.name not in res[u'Responses']: + continue + self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits'] + for elem in res[u'Responses'][self.table.name][u'Items']: + yield elem + + # re-queue un processed keys + self.keys = self.keys[100:] + self._queue_unprocessed(res) + + +class Table(object): + """ + An Amazon DynamoDB table. + + :ivar name: The name of the table. + :ivar create_time: The date and time that the table was created. + :ivar status: The current status of the table. One of: + 'ACTIVE', 'UPDATING', 'DELETING'. + :ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing + the schema defined for the table. + :ivar item_count: The number of items in the table. This value is + set only when the Table object is created or refreshed and + may not reflect the actual count. + :ivar size_bytes: Total size of the specified table, in bytes. + Amazon DynamoDB updates this value approximately every six hours. + Recent changes might not be reflected in this value. + :ivar read_units: The ReadCapacityUnits of the tables + Provisioned Throughput. + :ivar write_units: The WriteCapacityUnits of the tables + Provisioned Throughput. + :ivar schema: The Schema object associated with the table. + """ + + def __init__(self, layer2, response): + """ + + :type layer2: :class:`boto.dynamodb.layer2.Layer2` + :param layer2: A `Layer2` api object. + + :type response: dict + :param response: The output of + `boto.dynamodb.layer1.Layer1.describe_table`. + + """ + self.layer2 = layer2 + self._dict = {} + self.update_from_response(response) + + @classmethod + def create_from_schema(cls, layer2, name, schema): + """Create a Table object. + + If you know the name and schema of your table, you can + create a ``Table`` object without having to make any + API calls (normally an API call is made to retrieve + the schema of a table). + + Example usage:: + + table = Table.create_from_schema( + boto.connect_dynamodb(), + 'tablename', + Schema.create(hash_key=('keyname', 'N'))) + + :type layer2: :class:`boto.dynamodb.layer2.Layer2` + :param layer2: A ``Layer2`` api object. + + :type name: str + :param name: The name of the table. + + :type schema: :class:`boto.dynamodb.schema.Schema` + :param schema: The schema associated with the table. + + :rtype: :class:`boto.dynamodb.table.Table` + :return: A Table object representing the table. + + """ + table = cls(layer2, {'Table': {'TableName': name}}) + table._schema = schema + return table + + def __repr__(self): + return 'Table(%s)' % self.name + + @property + def name(self): + return self._dict['TableName'] + + @property + def create_time(self): + return self._dict.get('CreationDateTime', None) + + @property + def status(self): + return self._dict.get('TableStatus', None) + + @property + def item_count(self): + return self._dict.get('ItemCount', 0) + + @property + def size_bytes(self): + return self._dict.get('TableSizeBytes', 0) + + @property + def schema(self): + return self._schema + + @property + def read_units(self): + try: + return self._dict['ProvisionedThroughput']['ReadCapacityUnits'] + except KeyError: + return None + + @property + def write_units(self): + try: + return self._dict['ProvisionedThroughput']['WriteCapacityUnits'] + except KeyError: + return None + + def update_from_response(self, response): + """ + Update the state of the Table object based on the response + data received from Amazon DynamoDB. + """ + # 'Table' is from a describe_table call. + if 'Table' in response: + self._dict.update(response['Table']) + # 'TableDescription' is from a create_table call. + elif 'TableDescription' in response: + self._dict.update(response['TableDescription']) + if 'KeySchema' in self._dict: + self._schema = Schema(self._dict['KeySchema']) + + def refresh(self, wait_for_active=False, retry_seconds=5): + """ + Refresh all of the fields of the Table object by calling + the underlying DescribeTable request. + + :type wait_for_active: bool + :param wait_for_active: If True, this command will not return + until the table status, as returned from Amazon DynamoDB, is + 'ACTIVE'. + + :type retry_seconds: int + :param retry_seconds: If wait_for_active is True, this + parameter controls the number of seconds of delay between + calls to update_table in Amazon DynamoDB. Default is 5 seconds. + """ + done = False + while not done: + response = self.layer2.describe_table(self.name) + self.update_from_response(response) + if wait_for_active: + if self.status == 'ACTIVE': + done = True + else: + time.sleep(retry_seconds) + else: + done = True + + def update_throughput(self, read_units, write_units): + """ + Update the ProvisionedThroughput for the Amazon DynamoDB Table. + + :type read_units: int + :param read_units: The new value for ReadCapacityUnits. + + :type write_units: int + :param write_units: The new value for WriteCapacityUnits. + """ + self.layer2.update_throughput(self, read_units, write_units) + + def delete(self): + """ + Delete this table and all items in it. After calling this + the Table objects status attribute will be set to 'DELETING'. + """ + self.layer2.delete_table(self) + + def get_item(self, hash_key, range_key=None, + attributes_to_get=None, consistent_read=False, + item_class=Item): + """ + Retrieve an existing item from the table. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return self.layer2.get_item(self, hash_key, range_key, + attributes_to_get, consistent_read, + item_class) + lookup = get_item + + def has_item(self, hash_key, range_key=None, consistent_read=False): + """ + Checks the table to see if the Item with the specified ``hash_key`` + exists. This may save a tiny bit of time/bandwidth over a + straight :py:meth:`get_item` if you have no intention to touch + the data that is returned, since this method specifically tells + Amazon not to return anything but the Item's key. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the requested item. + The type of the value must match the type defined in the + schema for the table. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :rtype: bool + :returns: ``True`` if the Item exists, ``False`` if not. + """ + try: + # Attempt to get the key. If it can't be found, it'll raise + # an exception. + self.get_item(hash_key, range_key=range_key, + # This minimizes the size of the response body. + attributes_to_get=[hash_key], + consistent_read=consistent_read) + except dynamodb_exceptions.DynamoDBKeyNotFoundError: + # Key doesn't exist. + return False + return True + + def new_item(self, hash_key=None, range_key=None, attrs=None, + item_class=Item): + """ + Return an new, unsaved Item which can later be PUT to + Amazon DynamoDB. + + This method has explicit (but optional) parameters for + the hash_key and range_key values of the item. You can use + these explicit parameters when calling the method, such as:: + + >>> my_item = my_table.new_item(hash_key='a', range_key=1, + attrs={'key1': 'val1', 'key2': 'val2'}) + >>> my_item + {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} + + Or, if you prefer, you can simply put the hash_key and range_key + in the attrs dictionary itself, like this:: + + >>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'} + >>> my_item = my_table.new_item(attrs=attrs) + >>> my_item + {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} + + The effect is the same. + + .. note: + The explicit parameters take priority over the values in + the attrs dict. So, if you have a hash_key or range_key + in the attrs dict and you also supply either or both using + the explicit parameters, the values in the attrs will be + ignored. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the new item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key: int|long|float|str|unicode|Binary + :param range_key: The optional RangeKey of the new item. + The type of the value must match the type defined in the + schema for the table. + + :type attrs: dict + :param attrs: A dictionary of key value pairs used to + populate the new item. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return item_class(self, hash_key, range_key, attrs) + + def query(self, hash_key, *args, **kw): + """ + Perform a query on the table. + + :type hash_key: int|long|float|str|unicode|Binary + :param hash_key: The HashKey of the requested item. The + type of the value must match the type defined in the + schema for the table. + + :type range_key_condition: :class:`boto.dynamodb.condition.Condition` + :param range_key_condition: A Condition object. + Condition object can be one of the following types: + + EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN + + The only condition which expects or will accept two + values is 'BETWEEN', otherwise a single value should + be passed to the Condition constructor. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type consistent_read: bool + :param consistent_read: If True, a consistent read + request is issued. Otherwise, an eventually consistent + request is issued. + + :type scan_index_forward: bool + :param scan_index_forward: Specified forward or backward + traversal of the index. Default is forward (True). + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Query operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + """ + return self.layer2.query(self, hash_key, *args, **kw) + + def scan(self, *args, **kw): + """ + Scan through this table, this is a very long + and expensive operation, and should be avoided if + at all possible. + + :type scan_filter: A dict + :param scan_filter: A dictionary where the key is the + attribute name and the value is a + :class:`boto.dynamodb.condition.Condition` object. + Valid Condition objects include: + + * EQ - equal (1) + * NE - not equal (1) + * LE - less than or equal (1) + * LT - less than (1) + * GE - greater than or equal (1) + * GT - greater than (1) + * NOT_NULL - attribute exists (0, use None) + * NULL - attribute does not exist (0, use None) + * CONTAINS - substring or value in list (1) + * NOT_CONTAINS - absence of substring or value in list (1) + * BEGINS_WITH - substring prefix (1) + * IN - exact match in list (N) + * BETWEEN - >= first value, <= second value (2) + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :type request_limit: int + :param request_limit: The maximum number of items to retrieve + from Amazon DynamoDB on each request. You may want to set + a specific request_limit based on the provisioned throughput + of your table. The default behavior is to retrieve as many + results as possible per request. + + :type max_results: int + :param max_results: The maximum number of results that will + be retrieved from Amazon DynamoDB in total. For example, + if you only wanted to see the first 100 results from the + query, regardless of how many were actually available, you + could set max_results to 100 and the generator returned + from the query method will only yeild 100 results max. + + :type count: bool + :param count: If True, Amazon DynamoDB returns a total + number of items for the Scan operation, even if the + operation has no matching items for the assigned filter. + If count is True, the actual items are not returned and + the count is accessible as the ``count`` attribute of + the returned object. + + :type exclusive_start_key: list or tuple + :param exclusive_start_key: Primary key of the item from + which to continue an earlier query. This would be + provided as the LastEvaluatedKey in that query. + + :type item_class: Class + :param item_class: Allows you to override the class used + to generate the items. This should be a subclass of + :class:`boto.dynamodb.item.Item` + + :return: A TableGenerator (generator) object which will iterate + over all results + :rtype: :class:`boto.dynamodb.layer2.TableGenerator` + """ + return self.layer2.scan(self, *args, **kw) + + def batch_get_item(self, keys, attributes_to_get=None): + """ + Return a set of attributes for a multiple items from a single table + using their primary keys. This abstraction removes the 100 Items per + batch limitations as well as the "UnprocessedKeys" logic. + + :type keys: list + :param keys: A list of scalar or tuple values. Each element in the + list represents one Item to retrieve. If the schema for the + table has both a HashKey and a RangeKey, each element in the + list should be a tuple consisting of (hash_key, range_key). If + the schema for the table contains only a HashKey, each element + in the list should be a scalar value of the appropriate type + for the table schema. NOTE: The maximum number of items that + can be retrieved for a single operation is 100. Also, the + number of items retrieved is constrained by a 1 MB size limit. + + :type attributes_to_get: list + :param attributes_to_get: A list of attribute names. + If supplied, only the specified attribute names will + be returned. Otherwise, all attributes will be returned. + + :return: A TableBatchGenerator (generator) object which will + iterate over all results + :rtype: :class:`boto.dynamodb.table.TableBatchGenerator` + """ + return TableBatchGenerator(self, keys, attributes_to_get) diff --git a/ext/boto/dynamodb/types.py b/ext/boto/dynamodb/types.py new file mode 100644 index 0000000000..d9aaaa4cb3 --- /dev/null +++ b/ext/boto/dynamodb/types.py @@ -0,0 +1,410 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Some utility functions to deal with mapping Amazon DynamoDB types to +Python types and vice-versa. +""" +import base64 +from decimal import (Decimal, DecimalException, Context, + Clamped, Overflow, Inexact, Underflow, Rounded) +from collections import Mapping +from boto.dynamodb.exceptions import DynamoDBNumberError +from boto.compat import filter, map, six, long_type + + +DYNAMODB_CONTEXT = Context( + Emin=-128, Emax=126, rounding=None, prec=38, + traps=[Clamped, Overflow, Inexact, Rounded, Underflow]) + + +# python2.6 cannot convert floats directly to +# Decimals. This is taken from: +# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq +def float_to_decimal(f): + n, d = f.as_integer_ratio() + numerator, denominator = Decimal(n), Decimal(d) + ctx = DYNAMODB_CONTEXT + result = ctx.divide(numerator, denominator) + while ctx.flags[Inexact]: + ctx.flags[Inexact] = False + ctx.prec *= 2 + result = ctx.divide(numerator, denominator) + return result + + +def is_num(n, boolean_as_int=True): + if boolean_as_int: + types = (int, long_type, float, Decimal, bool) + else: + types = (int, long_type, float, Decimal) + + return isinstance(n, types) or n in types + + +if six.PY2: + def is_str(n): + return (isinstance(n, basestring) or + isinstance(n, type) and issubclass(n, basestring)) + + def is_binary(n): + return isinstance(n, Binary) + +else: # PY3 + def is_str(n): + return (isinstance(n, str) or + isinstance(n, type) and issubclass(n, str)) + + def is_binary(n): + return isinstance(n, bytes) # Binary is subclass of bytes. + + +def serialize_num(val): + """Cast a number to a string and perform + validation to ensure no loss of precision. + """ + if isinstance(val, bool): + return str(int(val)) + return str(val) + + +def convert_num(s): + if '.' in s: + n = float(s) + else: + n = int(s) + return n + + +def convert_binary(n): + return Binary(base64.b64decode(n)) + + +def get_dynamodb_type(val, use_boolean=True): + """ + Take a scalar Python value and return a string representing + the corresponding Amazon DynamoDB type. If the value passed in is + not a supported type, raise a TypeError. + """ + dynamodb_type = None + if val is None: + dynamodb_type = 'NULL' + elif is_num(val): + if isinstance(val, bool) and use_boolean: + dynamodb_type = 'BOOL' + else: + dynamodb_type = 'N' + elif is_str(val): + dynamodb_type = 'S' + elif isinstance(val, (set, frozenset)): + if False not in map(is_num, val): + dynamodb_type = 'NS' + elif False not in map(is_str, val): + dynamodb_type = 'SS' + elif False not in map(is_binary, val): + dynamodb_type = 'BS' + elif is_binary(val): + dynamodb_type = 'B' + elif isinstance(val, Mapping): + dynamodb_type = 'M' + elif isinstance(val, list): + dynamodb_type = 'L' + if dynamodb_type is None: + msg = 'Unsupported type "%s" for value "%s"' % (type(val), val) + raise TypeError(msg) + return dynamodb_type + + +def dynamize_value(val): + """ + Take a scalar Python value and return a dict consisting + of the Amazon DynamoDB type specification and the value that + needs to be sent to Amazon DynamoDB. If the type of the value + is not supported, raise a TypeError + """ + dynamodb_type = get_dynamodb_type(val) + if dynamodb_type == 'N': + val = {dynamodb_type: serialize_num(val)} + elif dynamodb_type == 'S': + val = {dynamodb_type: val} + elif dynamodb_type == 'NS': + val = {dynamodb_type: list(map(serialize_num, val))} + elif dynamodb_type == 'SS': + val = {dynamodb_type: [n for n in val]} + elif dynamodb_type == 'B': + if isinstance(val, bytes): + val = Binary(val) + val = {dynamodb_type: val.encode()} + elif dynamodb_type == 'BS': + val = {dynamodb_type: [n.encode() for n in val]} + return val + + +if six.PY2: + class Binary(object): + def __init__(self, value): + if not isinstance(value, (bytes, six.text_type)): + raise TypeError('Value must be a string of binary data!') + if not isinstance(value, bytes): + value = value.encode("utf-8") + + self.value = value + + def encode(self): + return base64.b64encode(self.value).decode('utf-8') + + def __eq__(self, other): + if isinstance(other, Binary): + return self.value == other.value + else: + return self.value == other + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return 'Binary(%r)' % self.value + + def __str__(self): + return self.value + + def __hash__(self): + return hash(self.value) +else: + class Binary(bytes): + def encode(self): + return base64.b64encode(self).decode('utf-8') + + @property + def value(self): + # This matches the public API of the Python 2 version, + # but just returns itself since it is already a bytes + # instance. + return bytes(self) + + def __repr__(self): + return 'Binary(%r)' % self.value + + +def item_object_hook(dct): + """ + A custom object hook for use when decoding JSON item bodys. + This hook will transform Amazon DynamoDB JSON responses to something + that maps directly to native Python types. + """ + if len(dct.keys()) > 1: + return dct + if 'S' in dct: + return dct['S'] + if 'N' in dct: + return convert_num(dct['N']) + if 'SS' in dct: + return set(dct['SS']) + if 'NS' in dct: + return set(map(convert_num, dct['NS'])) + if 'B' in dct: + return convert_binary(dct['B']) + if 'BS' in dct: + return set(map(convert_binary, dct['BS'])) + return dct + + +class Dynamizer(object): + """Control serialization/deserialization of types. + + This class controls the encoding of python types to the + format that is expected by the DynamoDB API, as well as + taking DynamoDB types and constructing the appropriate + python types. + + If you want to customize this process, you can subclass + this class and override the encoding/decoding of + specific types. For example:: + + 'foo' (Python type) + | + v + encode('foo') + | + v + _encode_s('foo') + | + v + {'S': 'foo'} (Encoding sent to/received from DynamoDB) + | + V + decode({'S': 'foo'}) + | + v + _decode_s({'S': 'foo'}) + | + v + 'foo' (Python type) + + """ + def _get_dynamodb_type(self, attr): + return get_dynamodb_type(attr) + + def encode(self, attr): + """ + Encodes a python type to the format expected + by DynamoDB. + + """ + dynamodb_type = self._get_dynamodb_type(attr) + try: + encoder = getattr(self, '_encode_%s' % dynamodb_type.lower()) + except AttributeError: + raise ValueError("Unable to encode dynamodb type: %s" % + dynamodb_type) + return {dynamodb_type: encoder(attr)} + + def _encode_n(self, attr): + try: + if isinstance(attr, float) and not hasattr(Decimal, 'from_float'): + # python2.6 does not support creating Decimals directly + # from floats so we have to do this ourself. + n = str(float_to_decimal(attr)) + else: + n = str(DYNAMODB_CONTEXT.create_decimal(attr)) + if list(filter(lambda x: x in n, ('Infinity', 'NaN'))): + raise TypeError('Infinity and NaN not supported') + return n + except (TypeError, DecimalException) as e: + msg = '{0} numeric for `{1}`\n{2}'.format( + e.__class__.__name__, attr, str(e) or '') + raise DynamoDBNumberError(msg) + + def _encode_s(self, attr): + if isinstance(attr, bytes): + attr = attr.decode('utf-8') + elif not isinstance(attr, six.text_type): + attr = str(attr) + return attr + + def _encode_ns(self, attr): + return list(map(self._encode_n, attr)) + + def _encode_ss(self, attr): + return [self._encode_s(n) for n in attr] + + def _encode_b(self, attr): + if isinstance(attr, bytes): + attr = Binary(attr) + return attr.encode() + + def _encode_bs(self, attr): + return [self._encode_b(n) for n in attr] + + def _encode_null(self, attr): + return True + + def _encode_bool(self, attr): + return attr + + def _encode_m(self, attr): + return dict([(k, self.encode(v)) for k, v in attr.items()]) + + def _encode_l(self, attr): + return [self.encode(i) for i in attr] + + def decode(self, attr): + """ + Takes the format returned by DynamoDB and constructs + the appropriate python type. + + """ + if len(attr) > 1 or not attr or is_str(attr): + return attr + dynamodb_type = list(attr.keys())[0] + if dynamodb_type.lower() == dynamodb_type: + # It's not an actual type, just a single character attr that + # overlaps with the DDB types. Return it. + return attr + try: + decoder = getattr(self, '_decode_%s' % dynamodb_type.lower()) + except AttributeError: + return attr + return decoder(attr[dynamodb_type]) + + def _decode_n(self, attr): + return DYNAMODB_CONTEXT.create_decimal(attr) + + def _decode_s(self, attr): + return attr + + def _decode_ns(self, attr): + return set(map(self._decode_n, attr)) + + def _decode_ss(self, attr): + return set(map(self._decode_s, attr)) + + def _decode_b(self, attr): + return convert_binary(attr) + + def _decode_bs(self, attr): + return set(map(self._decode_b, attr)) + + def _decode_null(self, attr): + return None + + def _decode_bool(self, attr): + return attr + + def _decode_m(self, attr): + return dict([(k, self.decode(v)) for k, v in attr.items()]) + + def _decode_l(self, attr): + return [self.decode(i) for i in attr] + + +class NonBooleanDynamizer(Dynamizer): + """Casting boolean type to numeric types. + + This class is provided for backward compatibility. + """ + def _get_dynamodb_type(self, attr): + return get_dynamodb_type(attr, use_boolean=False) + + +class LossyFloatDynamizer(NonBooleanDynamizer): + """Use float/int instead of Decimal for numeric types. + + This class is provided for backwards compatibility. Instead of + using Decimals for the 'N', 'NS' types it uses ints/floats. + + This class is deprecated and its usage is not encouraged, + as doing so may result in loss of precision. Use the + `Dynamizer` class instead. + + """ + def _encode_n(self, attr): + return serialize_num(attr) + + def _encode_ns(self, attr): + return [str(i) for i in attr] + + def _decode_n(self, attr): + return convert_num(attr) + + def _decode_ns(self, attr): + return set(map(self._decode_n, attr)) diff --git a/ext/boto/dynamodb2/__init__.py b/ext/boto/dynamodb2/__init__.py new file mode 100644 index 0000000000..7ac69cec78 --- /dev/null +++ b/ext/boto/dynamodb2/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon DynamoDB service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.dynamodb2.layer1 import DynamoDBConnection + return get_regions('dynamodb', connection_cls=DynamoDBConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.dynamodb2.layer1 import DynamoDBConnection + return connect('dynamodb', region_name, connection_cls=DynamoDBConnection, + **kw_params) diff --git a/ext/boto/dynamodb2/exceptions.py b/ext/boto/dynamodb2/exceptions.py new file mode 100644 index 0000000000..3a677e09df --- /dev/null +++ b/ext/boto/dynamodb2/exceptions.py @@ -0,0 +1,78 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ProvisionedThroughputExceededException(JSONResponseError): + pass + + +class LimitExceededException(JSONResponseError): + pass + + +class ConditionalCheckFailedException(JSONResponseError): + pass + + +class ResourceInUseException(JSONResponseError): + pass + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class InternalServerError(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass + + +class ItemCollectionSizeLimitExceededException(JSONResponseError): + pass + + +class DynamoDBError(Exception): + pass + + +class UnknownSchemaFieldError(DynamoDBError): + pass + + +class UnknownIndexFieldError(DynamoDBError): + pass + + +class UnknownFilterTypeError(DynamoDBError): + pass + + +class QueryError(DynamoDBError): + pass + + +class ItemNotFound(DynamoDBError): + pass diff --git a/ext/boto/dynamodb2/fields.py b/ext/boto/dynamodb2/fields.py new file mode 100644 index 0000000000..4443969e4e --- /dev/null +++ b/ext/boto/dynamodb2/fields.py @@ -0,0 +1,337 @@ +from boto.dynamodb2.types import STRING + + +class BaseSchemaField(object): + """ + An abstract class for defining schema fields. + + Contains most of the core functionality for the field. Subclasses must + define an ``attr_type`` to pass to DynamoDB. + """ + attr_type = None + + def __init__(self, name, data_type=STRING): + """ + Creates a Python schema field, to represent the data to pass to + DynamoDB. + + Requires a ``name`` parameter, which should be a string name of the + field. + + Optionally accepts a ``data_type`` parameter, which should be a + constant from ``boto.dynamodb2.types``. (Default: ``STRING``) + """ + self.name = name + self.data_type = data_type + + def definition(self): + """ + Returns the attribute definition structure DynamoDB expects. + + Example:: + + >>> field.definition() + { + 'AttributeName': 'username', + 'AttributeType': 'S', + } + + """ + return { + 'AttributeName': self.name, + 'AttributeType': self.data_type, + } + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> field.schema() + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + } + + """ + return { + 'AttributeName': self.name, + 'KeyType': self.attr_type, + } + + +class HashKey(BaseSchemaField): + """ + An field representing a hash key. + + Example:: + + >>> from boto.dynamodb2.types import NUMBER + >>> HashKey('username') + >>> HashKey('date_joined', data_type=NUMBER) + + """ + attr_type = 'HASH' + + +class RangeKey(BaseSchemaField): + """ + An field representing a range key. + + Example:: + + >>> from boto.dynamodb2.types import NUMBER + >>> HashKey('username') + >>> HashKey('date_joined', data_type=NUMBER) + + """ + attr_type = 'RANGE' + + +class BaseIndexField(object): + """ + An abstract class for defining schema indexes. + + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. + """ + def __init__(self, name, parts): + self.name = name + self.parts = parts + + def definition(self): + """ + Returns the attribute definition structure DynamoDB expects. + + Example:: + + >>> index.definition() + { + 'AttributeName': 'username', + 'AttributeType': 'S', + } + + """ + definition = [] + + for part in self.parts: + definition.append({ + 'AttributeName': part.name, + 'AttributeType': part.data_type, + }) + + return definition + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> index.schema() + { + 'IndexName': 'LastNameIndex', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + } + } + + """ + key_schema = [] + + for part in self.parts: + key_schema.append(part.schema()) + + return { + 'IndexName': self.name, + 'KeySchema': key_schema, + 'Projection': { + 'ProjectionType': self.projection_type, + } + } + + +class AllIndex(BaseIndexField): + """ + An index signifying all fields should be in the index. + + Example:: + + >>> AllIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ]) + + """ + projection_type = 'ALL' + + +class KeysOnlyIndex(BaseIndexField): + """ + An index signifying only key fields should be in the index. + + Example:: + + >>> KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ]) + + """ + projection_type = 'KEYS_ONLY' + + +class IncludeIndex(BaseIndexField): + """ + An index signifying only certain fields should be in the index. + + Example:: + + >>> IncludeIndex('GenderIndex', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], includes=['gender']) + + """ + projection_type = 'INCLUDE' + + def __init__(self, *args, **kwargs): + self.includes_fields = kwargs.pop('includes', []) + super(IncludeIndex, self).__init__(*args, **kwargs) + + def schema(self): + schema_data = super(IncludeIndex, self).schema() + schema_data['Projection']['NonKeyAttributes'] = self.includes_fields + return schema_data + + +class GlobalBaseIndexField(BaseIndexField): + """ + An abstract class for defining global indexes. + + Contains most of the core functionality for the index. Subclasses must + define a ``projection_type`` to pass to DynamoDB. + """ + throughput = { + 'read': 5, + 'write': 5, + } + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + + if throughput is not None: + self.throughput = throughput + + super(GlobalBaseIndexField, self).__init__(*args, **kwargs) + + def schema(self): + """ + Returns the schema structure DynamoDB expects. + + Example:: + + >>> index.schema() + { + 'IndexName': 'LastNameIndex', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'KEYS_ONLY', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + } + + """ + schema_data = super(GlobalBaseIndexField, self).schema() + schema_data['ProvisionedThroughput'] = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + return schema_data + + +class GlobalAllIndex(GlobalBaseIndexField): + """ + An index signifying all fields should be in the index. + + Example:: + + >>> GlobalAllIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'ALL' + + +class GlobalKeysOnlyIndex(GlobalBaseIndexField): + """ + An index signifying only key fields should be in the index. + + Example:: + + >>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'KEYS_ONLY' + + +class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex): + """ + An index signifying only certain fields should be in the index. + + Example:: + + >>> GlobalIncludeIndex('GenderIndex', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined') + ... ], + ... includes=['gender'], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + + """ + projection_type = 'INCLUDE' + + def __init__(self, *args, **kwargs): + throughput = kwargs.pop('throughput', None) + IncludeIndex.__init__(self, *args, **kwargs) + if throughput: + kwargs['throughput'] = throughput + GlobalBaseIndexField.__init__(self, *args, **kwargs) + + def schema(self): + # Pick up the includes. + schema_data = IncludeIndex.schema(self) + # Also the throughput. + schema_data.update(GlobalBaseIndexField.schema(self)) + return schema_data diff --git a/ext/boto/dynamodb2/items.py b/ext/boto/dynamodb2/items.py new file mode 100644 index 0000000000..89dd6681aa --- /dev/null +++ b/ext/boto/dynamodb2/items.py @@ -0,0 +1,473 @@ +from copy import deepcopy + + +class NEWVALUE(object): + # A marker for new data added. + pass + + +class Item(object): + """ + An object representing the item data within a DynamoDB table. + + An item is largely schema-free, meaning it can contain any data. The only + limitation is that it must have data for the fields in the ``Table``'s + schema. + + This object presents a dictionary-like interface for accessing/storing + data. It also tries to intelligently track how data has changed throughout + the life of the instance, to be as efficient as possible about updates. + + Empty items, or items that have no data, are considered falsey. + + """ + def __init__(self, table, data=None, loaded=False): + """ + Constructs an (unsaved) ``Item`` instance. + + To persist the data in DynamoDB, you'll need to call the ``Item.save`` + (or ``Item.partial_save``) on the instance. + + Requires a ``table`` parameter, which should be a ``Table`` instance. + This is required, as DynamoDB's API is focus around all operations + being table-level. It's also for persisting schema around many objects. + + Optionally accepts a ``data`` parameter, which should be a dictionary + of the fields & values of the item. Alternatively, an ``Item`` instance + may be provided from which to extract the data. + + Optionally accepts a ``loaded`` parameter, which should be a boolean. + ``True`` if it was preexisting data loaded from DynamoDB, ``False`` if + it's new data from the user. Default is ``False``. + + Example:: + + >>> users = Table('users') + >>> user = Item(users, data={ + ... 'username': 'johndoe', + ... 'first_name': 'John', + ... 'date_joined': 1248o61592, + ... }) + + # Change existing data. + >>> user['first_name'] = 'Johann' + # Add more data. + >>> user['last_name'] = 'Doe' + # Delete data. + >>> del user['date_joined'] + + # Iterate over all the data. + >>> for field, val in user.items(): + ... print "%s: %s" % (field, val) + username: johndoe + first_name: John + date_joined: 1248o61592 + + """ + self.table = table + self._loaded = loaded + self._orig_data = {} + self._data = data + self._dynamizer = table._dynamizer + + if isinstance(self._data, Item): + self._data = self._data._data + if self._data is None: + self._data = {} + + if self._loaded: + self._orig_data = deepcopy(self._data) + + def __getitem__(self, key): + return self._data.get(key, None) + + def __setitem__(self, key, value): + self._data[key] = value + + def __delitem__(self, key): + if not key in self._data: + return + + del self._data[key] + + def keys(self): + return self._data.keys() + + def values(self): + return self._data.values() + + def items(self): + return self._data.items() + + def get(self, key, default=None): + return self._data.get(key, default) + + def __iter__(self): + for key in self._data: + yield self._data[key] + + def __contains__(self, key): + return key in self._data + + def __bool__(self): + return bool(self._data) + + __nonzero__ = __bool__ + + def _determine_alterations(self): + """ + Checks the ``-orig_data`` against the ``_data`` to determine what + changes to the data are present. + + Returns a dictionary containing the keys ``adds``, ``changes`` & + ``deletes``, containing the updated data. + """ + alterations = { + 'adds': {}, + 'changes': {}, + 'deletes': [], + } + + orig_keys = set(self._orig_data.keys()) + data_keys = set(self._data.keys()) + + # Run through keys we know are in both for changes. + for key in orig_keys.intersection(data_keys): + if self._data[key] != self._orig_data[key]: + if self._is_storable(self._data[key]): + alterations['changes'][key] = self._data[key] + else: + alterations['deletes'].append(key) + + # Run through additions. + for key in data_keys.difference(orig_keys): + if self._is_storable(self._data[key]): + alterations['adds'][key] = self._data[key] + + # Run through deletions. + for key in orig_keys.difference(data_keys): + alterations['deletes'].append(key) + + return alterations + + def needs_save(self, data=None): + """ + Returns whether or not the data has changed on the ``Item``. + + Optionally accepts a ``data`` argument, which accepts the output from + ``self._determine_alterations()`` if you've already called it. Typically + unnecessary to do. Default is ``None``. + + Example: + + >>> user.needs_save() + False + >>> user['first_name'] = 'Johann' + >>> user.needs_save() + True + + """ + if data is None: + data = self._determine_alterations() + + needs_save = False + + for kind in ['adds', 'changes', 'deletes']: + if len(data[kind]): + needs_save = True + break + + return needs_save + + def mark_clean(self): + """ + Marks an ``Item`` instance as no longer needing to be saved. + + Example: + + >>> user.needs_save() + False + >>> user['first_name'] = 'Johann' + >>> user.needs_save() + True + >>> user.mark_clean() + >>> user.needs_save() + False + + """ + self._orig_data = deepcopy(self._data) + + def mark_dirty(self): + """ + DEPRECATED: Marks an ``Item`` instance as needing to be saved. + + This method is no longer necessary, as the state tracking on ``Item`` + has been improved to automatically detect proper state. + """ + return + + def load(self, data): + """ + This is only useful when being handed raw data from DynamoDB directly. + If you have a Python datastructure already, use the ``__init__`` or + manually set the data instead. + + Largely internal, unless you know what you're doing or are trying to + mix the low-level & high-level APIs. + """ + self._data = {} + + for field_name, field_value in data.get('Item', {}).items(): + self[field_name] = self._dynamizer.decode(field_value) + + self._loaded = True + self._orig_data = deepcopy(self._data) + + def get_keys(self): + """ + Returns a Python-style dict of the keys/values. + + Largely internal. + """ + key_fields = self.table.get_key_fields() + key_data = {} + + for key in key_fields: + key_data[key] = self[key] + + return key_data + + def get_raw_keys(self): + """ + Returns a DynamoDB-style dict of the keys/values. + + Largely internal. + """ + raw_key_data = {} + + for key, value in self.get_keys().items(): + raw_key_data[key] = self._dynamizer.encode(value) + + return raw_key_data + + def build_expects(self, fields=None): + """ + Builds up a list of expecations to hand off to DynamoDB on save. + + Largely internal. + """ + expects = {} + + if fields is None: + fields = list(self._data.keys()) + list(self._orig_data.keys()) + + # Only uniques. + fields = set(fields) + + for key in fields: + expects[key] = { + 'Exists': True, + } + value = None + + # Check for invalid keys. + if not key in self._orig_data and not key in self._data: + raise ValueError("Unknown key %s provided." % key) + + # States: + # * New field (only in _data) + # * Unchanged field (in both _data & _orig_data, same data) + # * Modified field (in both _data & _orig_data, different data) + # * Deleted field (only in _orig_data) + orig_value = self._orig_data.get(key, NEWVALUE) + current_value = self._data.get(key, NEWVALUE) + + if orig_value == current_value: + # Existing field unchanged. + value = current_value + else: + if key in self._data: + if not key in self._orig_data: + # New field. + expects[key]['Exists'] = False + else: + # Existing field modified. + value = orig_value + else: + # Existing field deleted. + value = orig_value + + if value is not None: + expects[key]['Value'] = self._dynamizer.encode(value) + + return expects + + def _is_storable(self, value): + # We need to prevent ``None``, empty string & empty set from + # heading to DDB, but allow false-y values like 0 & False make it. + if not value: + if not value in (0, 0.0, False): + return False + + return True + + def prepare_full(self): + """ + Runs through all fields & encodes them to be handed off to DynamoDB + as part of an ``save`` (``put_item``) call. + + Largely internal. + """ + # This doesn't save on its own. Rather, we prepare the datastructure + # and hand-off to the table to handle creation/update. + final_data = {} + + for key, value in self._data.items(): + if not self._is_storable(value): + continue + + final_data[key] = self._dynamizer.encode(value) + + return final_data + + def prepare_partial(self): + """ + Runs through **ONLY** the changed/deleted fields & encodes them to be + handed off to DynamoDB as part of an ``partial_save`` (``update_item``) + call. + + Largely internal. + """ + # This doesn't save on its own. Rather, we prepare the datastructure + # and hand-off to the table to handle creation/update. + final_data = {} + fields = set() + alterations = self._determine_alterations() + + for key, value in alterations['adds'].items(): + final_data[key] = { + 'Action': 'PUT', + 'Value': self._dynamizer.encode(self._data[key]) + } + fields.add(key) + + for key, value in alterations['changes'].items(): + final_data[key] = { + 'Action': 'PUT', + 'Value': self._dynamizer.encode(self._data[key]) + } + fields.add(key) + + for key in alterations['deletes']: + final_data[key] = { + 'Action': 'DELETE', + } + fields.add(key) + + return final_data, fields + + def partial_save(self): + """ + Saves only the changed data to DynamoDB. + + Extremely useful for high-volume/high-write data sets, this allows + you to update only a handful of fields rather than having to push + entire items. This prevents many accidental overwrite situations as + well as saves on the amount of data to transfer over the wire. + + Returns ``True`` on success, ``False`` if no save was performed or + the write failed. + + Example:: + + >>> user['last_name'] = 'Doh!' + # Only the last name field will be sent to DynamoDB. + >>> user.partial_save() + + """ + key = self.get_keys() + # Build a new dict of only the data we're changing. + final_data, fields = self.prepare_partial() + + if not final_data: + return False + + # Remove the key(s) from the ``final_data`` if present. + # They should only be present if this is a new item, in which + # case we shouldn't be sending as part of the data to update. + for fieldname, value in key.items(): + if fieldname in final_data: + del final_data[fieldname] + + try: + # It's likely also in ``fields``, so remove it there too. + fields.remove(fieldname) + except KeyError: + pass + + # Build expectations of only the fields we're planning to update. + expects = self.build_expects(fields=fields) + returned = self.table._update_item(key, final_data, expects=expects) + # Mark the object as clean. + self.mark_clean() + return returned + + def save(self, overwrite=False): + """ + Saves all data to DynamoDB. + + By default, this attempts to ensure that none of the underlying + data has changed. If any fields have changed in between when the + ``Item`` was constructed & when it is saved, this call will fail so + as not to cause any data loss. + + If you're sure possibly overwriting data is acceptable, you can pass + an ``overwrite=True``. If that's not acceptable, you may be able to use + ``Item.partial_save`` to only write the changed field data. + + Optionally accepts an ``overwrite`` parameter, which should be a + boolean. If you provide ``True``, the item will be forcibly overwritten + within DynamoDB, even if another process changed the data in the + meantime. (Default: ``False``) + + Returns ``True`` on success, ``False`` if no save was performed. + + Example:: + + >>> user['last_name'] = 'Doh!' + # All data on the Item is sent to DynamoDB. + >>> user.save() + + # If it fails, you can overwrite. + >>> user.save(overwrite=True) + + """ + if not self.needs_save() and not overwrite: + return False + + final_data = self.prepare_full() + expects = None + + if overwrite is False: + # Build expectations about *all* of the data. + expects = self.build_expects() + + returned = self.table._put_item(final_data, expects=expects) + # Mark the object as clean. + self.mark_clean() + return returned + + def delete(self): + """ + Deletes the item's data to DynamoDB. + + Returns ``True`` on success. + + Example:: + + # Buh-bye now. + >>> user.delete() + + """ + key_data = self.get_keys() + return self.table.delete_item(**key_data) diff --git a/ext/boto/dynamodb2/layer1.py b/ext/boto/dynamodb2/layer1.py new file mode 100644 index 0000000000..bd1eb1ea9d --- /dev/null +++ b/ext/boto/dynamodb2/layer1.py @@ -0,0 +1,2904 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from binascii import crc32 + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.dynamodb2 import exceptions + + +class DynamoDBConnection(AWSQueryConnection): + """ + Amazon DynamoDB + **Overview** + + This is the Amazon DynamoDB API Reference. This guide provides + descriptions and samples of the low-level DynamoDB API. For + information about DynamoDB application development, go to the + `Amazon DynamoDB Developer Guide`_. + + Instead of making the requests to the low-level DynamoDB API + directly from your application, we recommend that you use the AWS + Software Development Kits (SDKs). The easy-to-use libraries in the + AWS SDKs make it unnecessary to call the low-level DynamoDB API + directly from your application. The libraries take care of request + authentication, serialization, and connection management. For more + information, go to `Using the AWS SDKs with DynamoDB`_ in the + Amazon DynamoDB Developer Guide . + + If you decide to code against the low-level DynamoDB API directly, + you will need to write the necessary code to authenticate your + requests. For more information on signing your requests, go to + `Using the DynamoDB API`_ in the Amazon DynamoDB Developer Guide . + + The following are short descriptions of each low-level API action, + organized by function. + + **Managing Tables** + + + + CreateTable - Creates a table with user-specified provisioned + throughput settings. You must designate one attribute as the hash + primary key for the table; you can optionally designate a second + attribute as the range primary key. DynamoDB creates indexes on + these key attributes for fast data access. Optionally, you can + create one or more secondary indexes, which provide fast data + access using non-key attributes. + + DescribeTable - Returns metadata for a table, such as table + size, status, and index information. + + UpdateTable - Modifies the provisioned throughput settings for a + table. Optionally, you can modify the provisioned throughput + settings for global secondary indexes on the table. + + ListTables - Returns a list of all tables associated with the + current AWS account and endpoint. + + DeleteTable - Deletes a table and all of its indexes. + + + For conceptual information about managing tables, go to `Working + with Tables`_ in the Amazon DynamoDB Developer Guide . + + **Reading Data** + + + + GetItem - Returns a set of attributes for the item that has a + given primary key. By default, GetItem performs an eventually + consistent read; however, applications can specify a strongly + consistent read instead. + + BatchGetItem - Performs multiple GetItem requests for data items + using their primary keys, from one table or multiple tables. The + response from BatchGetItem has a size limit of 16 MB and returns a + maximum of 100 items. Both eventually consistent and strongly + consistent reads can be used. + + Query - Returns one or more items from a table or a secondary + index. You must provide a specific hash key value. You can narrow + the scope of the query using comparison operators against a range + key value, or on the index key. Query supports either eventual or + strong consistency. A single response has a size limit of 1 MB. + + Scan - Reads every item in a table; the result set is eventually + consistent. You can limit the number of items returned by + filtering the data attributes, using conditional expressions. Scan + can be used to enable ad-hoc querying of a table against non-key + attributes; however, since this is a full table scan without using + an index, Scan should not be used for any application query use + case that requires predictable performance. + + + For conceptual information about reading data, go to `Working with + Items`_ and `Query and Scan Operations`_ in the Amazon DynamoDB + Developer Guide . + + **Modifying Data** + + + + PutItem - Creates a new item, or replaces an existing item with + a new item (including all the attributes). By default, if an item + in the table already exists with the same primary key, the new + item completely replaces the existing item. You can use + conditional operators to replace an item only if its attribute + values match certain conditions, or to insert a new item only if + that item doesn't already exist. + + UpdateItem - Modifies the attributes of an existing item. You + can also use conditional operators to perform an update only if + the item's attribute values match certain conditions. + + DeleteItem - Deletes an item in a table by primary key. You can + use conditional operators to perform a delete an item only if the + item's attribute values match certain conditions. + + BatchWriteItem - Performs multiple PutItem and DeleteItem + requests across multiple tables in a single request. A failure of + any request(s) in the batch will not cause the entire + BatchWriteItem operation to fail. Supports batches of up to 25 + items to put or delete, with a maximum total request size of 16 + MB. + + + For conceptual information about modifying data, go to `Working + with Items`_ and `Query and Scan Operations`_ in the Amazon + DynamoDB Developer Guide . + """ + APIVersion = "2012-08-10" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com" + ServiceName = "DynamoDB" + TargetPrefix = "DynamoDB_20120810" + ResponseError = JSONResponseError + + _faults = { + "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException, + "LimitExceededException": exceptions.LimitExceededException, + "ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalServerError": exceptions.InternalServerError, + "ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException, + } + + NumberRetries = 10 + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + validate_checksums = kwargs.pop('validate_checksums', True) + if not region: + region_name = boto.config.get('DynamoDB', 'region', + self.DefaultRegionName) + for reg in boto.dynamodb2.regions(): + if reg.name == region_name: + region = reg + break + + # Only set host if it isn't manually overwritten + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(DynamoDBConnection, self).__init__(**kwargs) + self.region = region + self._validate_checksums = boto.config.getbool( + 'DynamoDB', 'validate_checksums', validate_checksums) + self.throughput_exceeded_events = 0 + + def _required_auth_capability(self): + return ['hmac-v4'] + + def batch_get_item(self, request_items, return_consumed_capacity=None): + """ + The BatchGetItem operation returns the attributes of one or + more items from one or more tables. You identify requested + items by primary key. + + A single operation can retrieve up to 16 MB of data, which can + contain as many as 100 items. BatchGetItem will return a + partial result if the response size limit is exceeded, the + table's provisioned throughput is exceeded, or an internal + processing failure occurs. If a partial result is returned, + the operation returns a value for UnprocessedKeys . You can + use this value to retry the operation starting with the next + item to get. + + For example, if you ask to retrieve 100 items, but each + individual item is 300 KB in size, the system returns 52 items + (so as not to exceed the 16 MB limit). It also returns an + appropriate UnprocessedKeys value so you can get the next page + of results. If desired, your application can include its own + logic to assemble the pages of results into one data set. + + If none of the items can be processed due to insufficient + provisioned throughput on all of the tables in the request, + then BatchGetItem will return a + ProvisionedThroughputExceededException . If at least one of + the items is successfully processed, then BatchGetItem + completes successfully, while returning the keys of the unread + items in UnprocessedKeys . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide . + + By default, BatchGetItem performs eventually consistent reads + on every table in the request. If you want strongly consistent + reads instead, you can set ConsistentRead to `True` for any or + all tables. + + In order to minimize response latency, BatchGetItem retrieves + items in parallel. + + When designing your application, keep in mind that DynamoDB + does not return attributes in any particular order. To help + parse the response by item, include the primary key values for + the items in your request in the AttributesToGet parameter. + + If a requested item does not exist, it is not returned in the + result. Requests for nonexistent items consume the minimum + read capacity units according to the type of read. For more + information, see `Capacity Units Calculations`_ in the Amazon + DynamoDB Developer Guide . + + :type request_items: map + :param request_items: + A map of one or more table names and, for each table, the corresponding + primary keys for the items to retrieve. Each table name can be + invoked only once. + + Each element in the map consists of the following: + + + + Keys - An array of primary key attribute values that define specific + items in the table. For each primary key, you must provide all of + the key attributes. For example, with a hash type primary key, you + only need to specify the hash attribute. For a hash-and-range type + primary key, you must specify both the hash attribute and the range + attribute. + + AttributesToGet - One or more attributes to be retrieved from the + table. By default, all attributes are returned. If a specified + attribute is not found, it does not appear in the result. Note that + AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + ConsistentRead - If `True`, a strongly consistent read is used; if + `False` (the default), an eventually consistent read is used. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + """ + params = {'RequestItems': request_items, } + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + return self.make_request(action='BatchGetItem', + body=json.dumps(params)) + + def batch_write_item(self, request_items, return_consumed_capacity=None, + return_item_collection_metrics=None): + """ + The BatchWriteItem operation puts or deletes multiple items in + one or more tables. A single call to BatchWriteItem can write + up to 16 MB of data, which can comprise as many as 25 put or + delete requests. Individual items to be written can be as + large as 400 KB. + + + BatchWriteItem cannot update items. To update items, use the + UpdateItem API. + + + The individual PutItem and DeleteItem operations specified in + BatchWriteItem are atomic; however BatchWriteItem as a whole + is not. If any requested operations fail because the table's + provisioned throughput is exceeded or an internal processing + failure occurs, the failed operations are returned in the + UnprocessedItems response parameter. You can investigate and + optionally resend the requests. Typically, you would call + BatchWriteItem in a loop. Each iteration would check for + unprocessed items and submit a new BatchWriteItem request with + those unprocessed items until all items have been processed. + + Note that if none of the items can be processed due to + insufficient provisioned throughput on all of the tables in + the request, then BatchWriteItem will return a + ProvisionedThroughputExceededException . + + If DynamoDB returns any unprocessed items, you should retry + the batch operation on those items. However, we strongly + recommend that you use an exponential backoff algorithm . If + you retry the batch operation immediately, the underlying read + or write requests can still fail due to throttling on the + individual tables. If you delay the batch operation using + exponential backoff, the individual requests in the batch are + much more likely to succeed. + + For more information, go to `Batch Operations and Error + Handling`_ in the Amazon DynamoDB Developer Guide . + + With BatchWriteItem , you can efficiently write or delete + large amounts of data, such as from Amazon Elastic MapReduce + (EMR), or copy data from another database into DynamoDB. In + order to improve performance with these large-scale + operations, BatchWriteItem does not behave in the same way as + individual PutItem and DeleteItem calls would For example, you + cannot specify conditions on individual put and delete + requests, and BatchWriteItem does not return deleted items in + the response. + + If you use a programming language that supports concurrency, + such as Java, you can use threads to write items in parallel. + Your application must include the necessary logic to manage + the threads. With languages that don't support threading, such + as PHP, you must update or delete the specified items one at a + time. In both situations, BatchWriteItem provides an + alternative where the API performs the specified put and + delete operations in parallel, giving you the power of the + thread pool approach without having to introduce complexity + into your application. + + Parallel processing reduces latency, but each specified put + and delete request consumes the same number of write capacity + units whether it is processed in parallel or not. Delete + operations on nonexistent items consume one write capacity + unit. + + If one or more of the following is true, DynamoDB rejects the + entire batch write operation: + + + + One or more tables specified in the BatchWriteItem request + does not exist. + + Primary key attributes specified on an item in the request + do not match those in the corresponding table's primary key + schema. + + You try to perform multiple operations on the same item in + the same BatchWriteItem request. For example, you cannot put + and delete the same item in the same BatchWriteItem request. + + There are more than 25 requests in the batch. + + Any individual item in a batch exceeds 400 KB. + + The total request size exceeds 16 MB. + + :type request_items: map + :param request_items: + A map of one or more table names and, for each table, a list of + operations to be performed ( DeleteRequest or PutRequest ). Each + element in the map consists of the following: + + + + DeleteRequest - Perform a DeleteItem operation on the specified item. + The item to be deleted is identified by a Key subelement: + + + Key - A map of primary key attribute values that uniquely identify + the ! item. Each entry in this map consists of an attribute name + and an attribute value. For each primary key, you must provide all + of the key attributes. For example, with a hash type primary key, + you only need to specify the hash attribute. For a hash-and-range + type primary key, you must specify both the hash attribute and the + range attribute. + + + PutRequest - Perform a PutItem operation on the specified item. The + item to be put is identified by an Item subelement: + + + Item - A map of attributes and their values. Each entry in this map + consists of an attribute name and an attribute value. Attribute + values must not be null; string and binary type attributes must + have lengths greater than zero; and set type attributes must not be + empty. Requests that contain empty values will be rejected with a + ValidationException exception. If you specify any attributes that + are part of an index key, then the data types for those attributes + must match those of the schema in the table's attribute definition. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + """ + params = {'RequestItems': request_items, } + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + return self.make_request(action='BatchWriteItem', + body=json.dumps(params)) + + def create_table(self, attribute_definitions, table_name, key_schema, + provisioned_throughput, local_secondary_indexes=None, + global_secondary_indexes=None): + """ + The CreateTable operation adds a new table to your account. In + an AWS account, table names must be unique within each region. + That is, you can have two tables with same name if you create + the tables in different regions. + + CreateTable is an asynchronous operation. Upon receiving a + CreateTable request, DynamoDB immediately returns a response + with a TableStatus of `CREATING`. After the table is created, + DynamoDB sets the TableStatus to `ACTIVE`. You can perform + read and write operations only on an `ACTIVE` table. + + You can optionally define secondary indexes on the new table, + as part of the CreateTable operation. If you want to create + multiple tables with secondary indexes on them, you must + create the tables sequentially. Only one table with secondary + indexes can be in the `CREATING` state at any given time. + + You can use the DescribeTable API to check the table status. + + :type attribute_definitions: list + :param attribute_definitions: An array of attributes that describe the + key schema for the table and indexes. + + :type table_name: string + :param table_name: The name of the table to create. + + :type key_schema: list + :param key_schema: Specifies the attributes that make up the primary + key for a table or an index. The attributes in KeySchema must also + be defined in the AttributeDefinitions array. For more information, + see `Data Model`_ in the Amazon DynamoDB Developer Guide . + Each KeySchemaElement in the array is composed of: + + + + AttributeName - The name of this key attribute. + + KeyType - Determines whether the key attribute is `HASH` or `RANGE`. + + + For a primary key that consists of a hash attribute, you must specify + exactly one element with a KeyType of `HASH`. + + For a primary key that consists of hash and range attributes, you must + specify exactly two elements, in this order: The first element must + have a KeyType of `HASH`, and the second element must have a + KeyType of `RANGE`. + + For more information, see `Specifying the Primary Key`_ in the Amazon + DynamoDB Developer Guide . + + :type local_secondary_indexes: list + :param local_secondary_indexes: + One or more local secondary indexes (the maximum is five) to be created + on the table. Each index is scoped to a given hash key value. There + is a 10 GB size limit per hash key; otherwise, the size of a local + secondary index is unconstrained. + + Each local secondary index in the array includes the following: + + + + IndexName - The name of the local secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the local secondary index. + The key schema must begin with the same hash key attribute as the + table. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + :type global_secondary_indexes: list + :param global_secondary_indexes: + One or more global secondary indexes (the maximum is five) to be + created on the table. Each global secondary index in the array + includes the following: + + + + IndexName - The name of the global secondary index. Must be unique + only for this table. + + KeySchema - Specifies the key schema for the global secondary index. + + Projection - Specifies attributes that are copied (projected) from + the table into the index. These are in addition to the primary key + attributes and index key attributes, which are automatically + projected. Each attribute specification is composed of: + + + ProjectionType - One of the following: + + + `KEYS_ONLY` - Only the index and primary keys are projected into the + index. + + `INCLUDE` - Only the specified table attributes are projected into + the index. The list of projected attributes are in NonKeyAttributes + . + + `ALL` - All of the table attributes are projected into the index. + + + NonKeyAttributes - A list of one or more non-key attribute names that + are projected into the secondary index. The total count of + attributes specified in NonKeyAttributes , summed across all of the + secondary indexes, must not exceed 20. If you project the same + attribute into two different indexes, this counts as two distinct + attributes when determining the total. + + + ProvisionedThroughput - The provisioned throughput settings for the + global secondary index, consisting of read and write capacity + units. + + :type provisioned_throughput: dict + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. + For current minimum and maximum provisioned throughput values, see + `Limits`_ in the Amazon DynamoDB Developer Guide . + + """ + params = { + 'AttributeDefinitions': attribute_definitions, + 'TableName': table_name, + 'KeySchema': key_schema, + 'ProvisionedThroughput': provisioned_throughput, + } + if local_secondary_indexes is not None: + params['LocalSecondaryIndexes'] = local_secondary_indexes + if global_secondary_indexes is not None: + params['GlobalSecondaryIndexes'] = global_secondary_indexes + return self.make_request(action='CreateTable', + body=json.dumps(params)) + + def delete_item(self, table_name, key, expected=None, + conditional_operator=None, return_values=None, + return_consumed_capacity=None, + return_item_collection_metrics=None, + condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Deletes a single item in a table by primary key. You can + perform a conditional delete operation that deletes the item + if it exists, or if it has an expected attribute value. + + In addition to deleting an item, you can also return the + item's attribute values in the same operation, using the + ReturnValues parameter. + + Unless you specify conditions, the DeleteItem is an idempotent + operation; running it multiple times on the same item or + attribute does not result in an error response. + + Conditional deletes are useful for deleting items only if + specific conditions are met. If those conditions are met, + DynamoDB performs the delete. Otherwise, the item is not + deleted. + + :type table_name: string + :param table_name: The name of the table from which to delete the item. + + :type key: map + :param key: A map of attribute names to AttributeValue objects, + representing the primary key of the item to delete. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the DeleteItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared before they were deleted. For DeleteItem , the valid + values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - The content of the old item is returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional DeleteItem to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if expected is not None: + params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='DeleteItem', + body=json.dumps(params)) + + def delete_table(self, table_name): + """ + The DeleteTable operation deletes a table and all of its + items. After a DeleteTable request, the specified table is in + the `DELETING` state until DynamoDB completes the deletion. If + the table is in the `ACTIVE` state, you can delete it. If a + table is in `CREATING` or `UPDATING` states, then DynamoDB + returns a ResourceInUseException . If the specified table does + not exist, DynamoDB returns a ResourceNotFoundException . If + table is already in the `DELETING` state, no error is + returned. + + + DynamoDB might continue to accept data read and write + operations, such as GetItem and PutItem , on a table in the + `DELETING` state until the table deletion is complete. + + + When you delete a table, any indexes on that table are also + deleted. + + Use the DescribeTable API to check the status of the table. + + :type table_name: string + :param table_name: The name of the table to delete. + + """ + params = {'TableName': table_name, } + return self.make_request(action='DeleteTable', + body=json.dumps(params)) + + def describe_table(self, table_name): + """ + Returns information about the table, including the current + status of the table, when it was created, the primary key + schema, and any indexes on the table. + + + If you issue a DescribeTable request immediately after a + CreateTable request, DynamoDB might return a + ResourceNotFoundException. This is because DescribeTable uses + an eventually consistent query, and the metadata for your + table might not be available at that moment. Wait for a few + seconds, and then try the DescribeTable request again. + + :type table_name: string + :param table_name: The name of the table to describe. + + """ + params = {'TableName': table_name, } + return self.make_request(action='DescribeTable', + body=json.dumps(params)) + + def get_item(self, table_name, key, attributes_to_get=None, + consistent_read=None, return_consumed_capacity=None, + projection_expression=None, expression_attribute_names=None): + """ + The GetItem operation returns a set of attributes for the item + with the given primary key. If there is no matching item, + GetItem does not return any data. + + GetItem provides an eventually consistent read by default. If + your application requires a strongly consistent read, set + ConsistentRead to `True`. Although a strongly consistent read + might take more time than an eventually consistent read, it + always returns the last updated value. + + :type table_name: string + :param table_name: The name of the table containing the requested item. + + :type key: map + :param key: A map of attribute names to AttributeValue objects, + representing the primary key of the item to retrieve. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + :type consistent_read: boolean + :param consistent_read: A value that if set to `True`, then the + operation uses strongly consistent reads; otherwise, eventually + consistent reads are used. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if consistent_read is not None: + params['ConsistentRead'] = consistent_read + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + return self.make_request(action='GetItem', + body=json.dumps(params)) + + def list_tables(self, exclusive_start_table_name=None, limit=None): + """ + Returns an array of table names associated with the current + account and endpoint. The output from ListTables is paginated, + with each page returning a maximum of 100 table names. + + :type exclusive_start_table_name: string + :param exclusive_start_table_name: The first table name that this + operation will evaluate. Use the value that was returned for + LastEvaluatedTableName in a previous operation, so that you can + obtain the next page of results. + + :type limit: integer + :param limit: A maximum number of table names to return. If this + parameter is not specified, the limit is 100. + + """ + params = {} + if exclusive_start_table_name is not None: + params['ExclusiveStartTableName'] = exclusive_start_table_name + if limit is not None: + params['Limit'] = limit + return self.make_request(action='ListTables', + body=json.dumps(params)) + + def put_item(self, table_name, item, expected=None, return_values=None, + return_consumed_capacity=None, + return_item_collection_metrics=None, + conditional_operator=None, condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Creates a new item, or replaces an old item with a new item. + If an item that has the same primary key as the new item + already exists in the specified table, the new item completely + replaces the existing item. You can perform a conditional put + operation (add a new item if one with the specified primary + key doesn't exist), or replace an existing item if it has + certain attribute values. + + In addition to putting an item, you can also return the item's + attribute values in the same operation, using the ReturnValues + parameter. + + When you add an item, the primary key attribute(s) are the + only required attributes. Attribute values cannot be null. + String and Binary type attributes must have lengths greater + than zero. Set type attributes cannot be empty. Requests with + empty values will be rejected with a ValidationException + exception. + + You can request that PutItem return either a copy of the + original item (before the update) or a copy of the updated + item (after the update). For more information, see the + ReturnValues description below. + + + To prevent a new item from replacing an existing item, use a + conditional put operation with ComparisonOperator set to + `NULL` for the primary key attribute, or attributes. + + + For more information about using this API, see `Working with + Items`_ in the Amazon DynamoDB Developer Guide . + + :type table_name: string + :param table_name: The name of the table to contain the item. + + :type item: map + :param item: A map of attribute name/value pairs, one for each + attribute. Only the primary key attributes are required; you can + optionally provide other attribute name-value pairs for the item. + You must provide all of the attributes for the primary key. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + If you specify any attributes that are part of an index key, then the + data types for those attributes must match those of the schema in + the table's attribute definition. + + For more information about primary keys, see `Primary Key`_ in the + Amazon DynamoDB Developer Guide . + + Each element in the Item map is an AttributeValue object. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the PutItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared before they were updated with the PutItem request. For + PutItem , the valid values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - If PutItem overwrote an attribute name-value pair, then + the content of the old item is returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional PutItem operation to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Item': item, } + if expected is not None: + params['Expected'] = expected + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='PutItem', + body=json.dumps(params)) + + def query(self, table_name, key_conditions, index_name=None, select=None, + attributes_to_get=None, limit=None, consistent_read=None, + query_filter=None, conditional_operator=None, + scan_index_forward=None, exclusive_start_key=None, + return_consumed_capacity=None, projection_expression=None, + filter_expression=None, expression_attribute_names=None, + expression_attribute_values=None): + """ + A Query operation directly accesses items from a table using + the table primary key, or from an index using the index key. + You must provide a specific hash key value. You can narrow the + scope of the query by using comparison operators on the range + key value, or on the index key. You can use the + ScanIndexForward parameter to get results in forward or + reverse order, by range key or by index key. + + Queries that do not return results consume the minimum number + of read capacity units for that type of read operation. + + If the total number of items meeting the query criteria + exceeds the result set size limit of 1 MB, the query stops and + results are returned to the user with LastEvaluatedKey to + continue the query in a subsequent operation. Unlike a Scan + operation, a Query operation never returns both an empty + result set and a LastEvaluatedKey . The LastEvaluatedKey is + only provided if the results exceed 1 MB, or if you have used + Limit . + + You can query a table, a local secondary index, or a global + secondary index. For a query on a table or on a local + secondary index, you can set ConsistentRead to true and obtain + a strongly consistent result. Global secondary indexes support + eventually consistent reads only, so do not specify + ConsistentRead when querying a global secondary index. + + :type table_name: string + :param table_name: The name of the table containing the requested + items. + + :type index_name: string + :param index_name: The name of an index to query. This index can be any + local secondary index or global secondary index on the table. + + :type select: string + :param select: The attributes to be returned in the result. You can + retrieve all item attributes, specific item attributes, the count + of matching items, or in the case of an index, some or all of the + attributes projected into the index. + + + `ALL_ATTRIBUTES` - Returns all of the item attributes from the + specified table or index. If you query a local secondary index, + then for each matching item in the index DynamoDB will fetch the + entire item from the parent table. If the index is configured to + project all item attributes, then all of the data can be obtained + from the local secondary index, and no fetching is required. + + `ALL_PROJECTED_ATTRIBUTES` - Allowed only when querying an index. + Retrieves all attributes that have been projected into the index. + If the index is configured to project all attributes, this return + value is equivalent to specifying `ALL_ATTRIBUTES`. + + `COUNT` - Returns the number of matching items, rather than the + matching items themselves. + + `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in + AttributesToGet . This return value is equivalent to specifying + AttributesToGet without specifying any value for Select . If you + query a local secondary index and request only attributes that are + projected into that index, the operation will read only the index + and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. If you query a + global secondary index, you can only request attributes that are + projected into the index. Global secondary index queries cannot + fetch attributes from the parent table. + + + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES` when accessing a table, and + `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use + both Select and AttributesToGet together in a single request, + unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage + is equivalent to specifying AttributesToGet without any value for + Select .) + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + You cannot use both AttributesToGet and Select together in a Query + request, unless the value for Select is `SPECIFIC_ATTRIBUTES`. + (This usage is equivalent to specifying AttributesToGet without any + value for Select .) + + If you query a local secondary index and request only attributes that + are projected into that index, the operation will read only the + index and not the table. If any of the requested attributes are not + projected into the local secondary index, DynamoDB will fetch each + of these attributes from the parent table. This extra fetching + incurs additional throughput cost and latency. + + If you query a global secondary index, you can only request attributes + that are projected into the index. Global secondary index queries + cannot fetch attributes from the parent table. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + key in LastEvaluatedKey to apply in a subsequent operation, so that + you can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + key in LastEvaluatedKey to apply in a subsequent operation to + continue the operation. For more information, see `Query and Scan`_ + in the Amazon DynamoDB Developer Guide . + + :type consistent_read: boolean + :param consistent_read: A value that if set to `True`, then the + operation uses strongly consistent reads; otherwise, eventually + consistent reads are used. + Strongly consistent reads are not supported on global secondary + indexes. If you query a global secondary index with ConsistentRead + set to `True`, you will receive an error message. + + :type key_conditions: map + :param key_conditions: The selection criteria for the query. For a + query on a table, you can have conditions only on the table primary + key attributes. You must specify the hash key attribute name and + value as an `EQ` condition. You can optionally specify a second + condition, referring to the range key attribute. If you do not + specify a range key condition, all items under the hash key will be + fetched and processed. Any filters will applied after this. + For a query on an index, you can have conditions only on the index key + attributes. You must specify the index hash attribute name and + value as an EQ condition. You can optionally specify a second + condition, referring to the index key range attribute. + + Each KeyConditions element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes, for + example, equals, greater than, less than, and so on. For + KeyConditions , only the following comparison operators are + supported: `EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN` The + following are descriptions of these comparison operators. + + + `EQ` : Equal. AttributeValueList can contain only one AttributeValue + of type String, Number, or Binary (not a set type). If an item + contains an AttributeValue element of a different type than the one + specified in the request, the value does not match. For example, + `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not + equal `{"NS":["6", "2", "1"]}`. + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + :type query_filter: map + :param query_filter: + There is a newer parameter available. Use FilterExpression instead. + Note that if you use QueryFilter and FilterExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A condition that evaluates the query results after the items are read + and returns only the desired values. + Query filters are applied after the items are read, so they do not + limit the capacity used. + If you specify more than one condition in the QueryFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + + QueryFilter does not allow key attributes. You cannot define a filter + condition on a hash key or range key. + + + Each QueryFilter element consists of an attribute name to compare, + along with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + operator specified in ComparisonOperator . For type Number, value + comparisons are numeric. String value comparisons for greater than, + equals, or less than are based on ASCII character code values. For + example, `a` is greater than `A`, and `a` is greater than `B`. For + a list of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide . + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `API_Condition.html`_. + + :type conditional_operator: string + :param conditional_operator: + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the QueryFilter map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type scan_index_forward: boolean + :param scan_index_forward: A value that specifies ascending (true) or + descending (false) traversal of the index. DynamoDB returns results + reflecting the requested order determined by the range key. If the + data type is Number, the results are returned in numeric order. For + type String, the results are returned in order of ASCII character + code values. For type Binary, DynamoDB treats each byte of the + binary data as unsigned when it compares binary values. + If ScanIndexForward is not specified, the results are returned in + ascending order. + + :type exclusive_start_key: map + :param exclusive_start_key: The primary key of the first item that this + operation will evaluate. Use the value that was returned for + LastEvaluatedKey in the previous operation. + The data type for ExclusiveStartKey must be String, Number or Binary. + No set data types are allowed. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type filter_expression: string + :param filter_expression: A condition that evaluates the query results + after the items are read and returns only the desired values. + The condition you specify is applied to the items queried; any items + that do not match the expression are not returned. + Filter expressions are applied after the items are read, so they do not + limit the capacity used. + A FilterExpression has the same syntax as a ConditionExpression . For + more information on expression syntax, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = { + 'TableName': table_name, + 'KeyConditions': key_conditions, + } + if index_name is not None: + params['IndexName'] = index_name + if select is not None: + params['Select'] = select + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if limit is not None: + params['Limit'] = limit + if consistent_read is not None: + params['ConsistentRead'] = consistent_read + if query_filter is not None: + params['QueryFilter'] = query_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if scan_index_forward is not None: + params['ScanIndexForward'] = scan_index_forward + if exclusive_start_key is not None: + params['ExclusiveStartKey'] = exclusive_start_key + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if filter_expression is not None: + params['FilterExpression'] = filter_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='Query', + body=json.dumps(params)) + + def scan(self, table_name, attributes_to_get=None, limit=None, + select=None, scan_filter=None, conditional_operator=None, + exclusive_start_key=None, return_consumed_capacity=None, + total_segments=None, segment=None, projection_expression=None, + filter_expression=None, expression_attribute_names=None, + expression_attribute_values=None): + """ + The Scan operation returns one or more items and item + attributes by accessing every item in the table. To have + DynamoDB return fewer items, you can provide a ScanFilter + operation. + + If the total number of scanned items exceeds the maximum data + set size limit of 1 MB, the scan stops and results are + returned to the user as a LastEvaluatedKey value to continue + the scan in a subsequent operation. The results also include + the number of items exceeding the limit. A scan can result in + no table data meeting the filter criteria. + + The result set is eventually consistent. + + By default, Scan operations proceed sequentially; however, for + faster performance on large tables, applications can request a + parallel Scan operation by specifying the Segment and + TotalSegments parameters. For more information, see `Parallel + Scan`_ in the Amazon DynamoDB Developer Guide . + + :type table_name: string + :param table_name: The name of the table containing the requested + items. + + :type attributes_to_get: list + :param attributes_to_get: + There is a newer parameter available. Use ProjectionExpression instead. + Note that if you use AttributesToGet and ProjectionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter allows you to retrieve lists or maps; however, it cannot + retrieve individual list or map elements. + + The names of one or more attributes to retrieve. If no attribute names + are specified, then all attributes will be returned. If any of the + requested attributes are not found, they will not appear in the + result. + + Note that AttributesToGet has no effect on provisioned throughput + consumption. DynamoDB determines capacity units consumed based on + item size, not on the amount of data that is returned to an + application. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). If DynamoDB processes the number of + items up to the limit while processing the results, it stops the + operation and returns the matching values up to that point, and a + key in LastEvaluatedKey to apply in a subsequent operation, so that + you can pick up where you left off. Also, if the processed data set + size exceeds 1 MB before DynamoDB reaches this limit, it stops the + operation and returns the matching values up to the limit, and a + key in LastEvaluatedKey to apply in a subsequent operation to + continue the operation. For more information, see `Query and Scan`_ + in the Amazon DynamoDB Developer Guide . + + :type select: string + :param select: The attributes to be returned in the result. You can + retrieve all item attributes, specific item attributes, or the + count of matching items. + + + `ALL_ATTRIBUTES` - Returns all of the item attributes. + + `COUNT` - Returns the number of matching items, rather than the + matching items themselves. + + `SPECIFIC_ATTRIBUTES` - Returns only the attributes listed in + AttributesToGet . This return value is equivalent to specifying + AttributesToGet without specifying any value for Select . + + + If neither Select nor AttributesToGet are specified, DynamoDB defaults + to `ALL_ATTRIBUTES`. You cannot use both AttributesToGet and Select + together in a single request, unless the value for Select is + `SPECIFIC_ATTRIBUTES`. (This usage is equivalent to specifying + AttributesToGet without any value for Select .) + + :type scan_filter: map + :param scan_filter: + There is a newer parameter available. Use FilterExpression instead. + Note that if you use ScanFilter and FilterExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A condition that evaluates the scan results and returns only the + desired values. + + If you specify more than one condition in the ScanFilter map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + Each ScanFilter element consists of an attribute name to compare, along + with the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + operator specified in ComparisonOperator . For type Number, value + comparisons are numeric. String value comparisons for greater than, + equals, or less than are based on ASCII character code values. For + example, `a` is greater than `A`, and `a` is greater than `B`. For + a list of code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. For information on specifying data + types in JSON, see `JSON Data Format`_ in the Amazon DynamoDB + Developer Guide . + + ComparisonOperator - A comparator for evaluating attributes. For + example, equals, greater than, less than, etc. The following + comparison operators are available: `EQ | NE | LE | LT | GE | GT | + NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | + BETWEEN` For complete descriptions of all comparison operators, see + `Condition`_. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the ScanFilter map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type exclusive_start_key: map + :param exclusive_start_key: The primary key of the first item that this + operation will evaluate. Use the value that was returned for + LastEvaluatedKey in the previous operation. + The data type for ExclusiveStartKey must be String, Number or Binary. + No set data types are allowed. + + In a parallel scan, a Scan request that includes ExclusiveStartKey must + specify the same segment whose previous Scan returned the + corresponding value of LastEvaluatedKey . + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type total_segments: integer + :param total_segments: For a parallel Scan request, TotalSegments + represents the total number of segments into which the Scan + operation will be divided. The value of TotalSegments corresponds + to the number of application workers that will perform the parallel + scan. For example, if you want to scan a table using four + application threads, specify a TotalSegments value of 4. + The value for TotalSegments must be greater than or equal to 1, and + less than or equal to 1000000. If you specify a TotalSegments value + of 1, the Scan operation will be sequential rather than parallel. + + If you specify TotalSegments , you must also specify Segment . + + :type segment: integer + :param segment: For a parallel Scan request, Segment identifies an + individual segment to be scanned by an application worker. + Segment IDs are zero-based, so the first segment is always 0. For + example, if you want to scan a table using four application + threads, the first thread specifies a Segment value of 0, the + second thread specifies 1, and so on. + + The value of LastEvaluatedKey returned from a parallel Scan request + must be used as ExclusiveStartKey with the same segment ID in a + subsequent Scan operation. + + The value for Segment must be greater than or equal to 0, and less than + the value provided for TotalSegments . + + If you specify Segment , you must also specify TotalSegments . + + :type projection_expression: string + :param projection_expression: A string that identifies one or more + attributes to retrieve from the table. These attributes can include + scalars, sets, or elements of a JSON document. The attributes in + the expression must be separated by commas. + If no attribute names are specified, then all attributes will be + returned. If any of the requested attributes are not found, they + will not appear in the result. + + For more information on projection expressions, go to `Accessing Item + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type filter_expression: string + :param filter_expression: A condition that evaluates the scan results + and returns only the desired values. + The condition you specify is applied to the items scanned; any items + that do not match the expression are not returned. + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, } + if attributes_to_get is not None: + params['AttributesToGet'] = attributes_to_get + if limit is not None: + params['Limit'] = limit + if select is not None: + params['Select'] = select + if scan_filter is not None: + params['ScanFilter'] = scan_filter + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if exclusive_start_key is not None: + params['ExclusiveStartKey'] = exclusive_start_key + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if total_segments is not None: + params['TotalSegments'] = total_segments + if segment is not None: + params['Segment'] = segment + if projection_expression is not None: + params['ProjectionExpression'] = projection_expression + if filter_expression is not None: + params['FilterExpression'] = filter_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='Scan', + body=json.dumps(params)) + + def update_item(self, table_name, key, attribute_updates=None, + expected=None, conditional_operator=None, + return_values=None, return_consumed_capacity=None, + return_item_collection_metrics=None, + update_expression=None, condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None): + """ + Edits an existing item's attributes, or adds a new item to the + table if it does not already exist. You can put, delete, or + add attribute values. You can also perform a conditional + update (insert a new attribute name-value pair if it doesn't + exist, or replace an existing name-value pair if it has + certain expected attribute values). + + You can also return the item's attribute values in the same + UpdateItem operation using the ReturnValues parameter. + + :type table_name: string + :param table_name: The name of the table containing the item to update. + + :type key: map + :param key: The primary key of the item to be updated. Each element + consists of an attribute name and a value for that attribute. + For the primary key, you must provide all of the attributes. For + example, with a hash type primary key, you only need to specify the + hash attribute. For a hash-and-range type primary key, you must + specify both the hash attribute and the range attribute. + + :type attribute_updates: map + :param attribute_updates: + There is a newer parameter available. Use UpdateExpression instead. + Note that if you use AttributeUpdates and UpdateExpression at the + same time, DynamoDB will return a ValidationException exception. + + This parameter can be used for modifying top-level attributes; however, + it does not support individual list or map elements. + + The names of attributes to be modified, the action to perform on each, + and the new value for each. If you are updating an attribute that + is an index key attribute for any indexes on that table, the + attribute type must match the index key type defined in the + AttributesDefinition of the table description. You can use + UpdateItem to update any nonkey attributes. + + Attribute values cannot be null. String and Binary type attributes must + have lengths greater than zero. Set type attributes must not be + empty. Requests with empty values will be rejected with a + ValidationException exception. + + Each AttributeUpdates element consists of an attribute name to modify, + along with the following: + + + + Value - The new value, if applicable, for this attribute. + + Action - A value that specifies how to perform the update. This + action is only valid for an existing attribute whose data type is + Number or is a set; do not use `ADD` for other data types. If an + item with the specified primary key is found in the table, the + following values perform the following actions: + + + `PUT` - Adds the specified attribute to the item. If the attribute + already exists, it is replaced by the new value. + + `DELETE` - Removes the attribute and its value, if no value is + specified for `DELETE`. The data type of the specified value must + match the existing value's data type. If a set of values is + specified, then those values are subtracted from the old set. For + example, if the attribute value was the set `[a,b,c]` and the + `DELETE` action specifies `[a,c]`, then the final attribute value + is `[b]`. Specifying an empty set is an error. + + `ADD` - Adds the specified value to the item, if the attribute does + not already exist. If the attribute does exist, then the behavior + of `ADD` depends on the data type of the attribute: + + + If the existing attribute is a number, and if Value is also a number, + then Value is mathematically added to the existing attribute. If + Value is a negative number, then it is subtracted from the existing + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses 0 as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute, with a + value of `3`. + + If the existing data type is a set, and if Value is also a set, then + Value is appended to the existing set. For example, if the + attribute value is the set `[1,2]`, and the `ADD` action specified + `[3]`, then the final attribute value is `[1,2,3]`. An error occurs + if an `ADD` action is specified for a set attribute and the + attribute type specified does not match the existing set type. Both + sets must have the same primitive data type. For example, if the + existing data type is a set of strings, Value must also be a set of + strings. + + If no item with the specified key is found in the table, the following + values perform the following actions: + + + `PUT` - Causes DynamoDB to create a new item with the specified + primary key, and then adds the attribute. + + `DELETE` - Nothing happens, because attributes cannot be deleted from + a nonexistent item. The operation succeeds, but DynamoDB does not + create a new item. + + `ADD` - Causes DynamoDB to create an item with the supplied primary + key and number (or set of numbers) for the attribute value. The + only data types allowed are Number and Number Set. + + + + If you specify any attributes that are part of an index key, then the + data types for those attributes must match those of the schema in + the table's attribute definition. + + :type expected: map + :param expected: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use Expected and ConditionExpression at the same + time, DynamoDB will return a ValidationException exception. + + This parameter does not support lists or maps. + + A map of attribute/condition pairs. Expected provides a conditional + block for the UpdateItem operation. + + Each element of Expected consists of an attribute name, a comparison + operator, and one or more values. DynamoDB compares the attribute + with the value(s) you supplied, using the comparison operator. For + each Expected element, the result of the evaluation is either true + or false. + + If you specify more than one element in the Expected map, then by + default all of the conditions must evaluate to true. In other + words, the conditions are ANDed together. (You can use the + ConditionalOperator parameter to OR the conditions instead. If you + do this, then at least one of the conditions must evaluate to true, + rather than all of them.) + + If the Expected map evaluates to true, then the conditional operation + succeeds; otherwise, it fails. + + Expected contains the following: + + + + AttributeValueList - One or more values to evaluate against the + supplied attribute. The number of values in the list depends on the + ComparisonOperator being used. For type Number, value comparisons + are numeric. String value comparisons for greater than, equals, or + less than are based on ASCII character code values. For example, + `a` is greater than `A`, and `a` is greater than `B`. For a list of + code values, see + `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_. + For type Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values, for example when + evaluating query expressions. + + ComparisonOperator - A comparator for evaluating attributes in the + AttributeValueList . When performing the comparison, DynamoDB uses + strongly consistent reads. The following comparison operators are + available: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | + CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` The following + are descriptions of each comparison operator. + + + `EQ` : Equal. `EQ` is supported for all datatypes, including lists + and maps. AttributeValueList can contain only one AttributeValue + element of type String, Number, Binary, String Set, Number Set, or + Binary Set. If an item contains an AttributeValue element of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `NE` : Not equal. `NE` is supported for all datatypes, including + lists and maps. AttributeValueList can contain only one + AttributeValue of type String, Number, Binary, String Set, Number + Set, or Binary Set. If an item contains an AttributeValue of a + different type than the one specified in the request, the value + does not match. For example, `{"S":"6"}` does not equal + `{"N":"6"}`. Also, `{"N":"6"}` does not equal `{"NS":["6", "2", + "1"]}`. >
  • + + `LE` : Less than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `LT` : Less than. AttributeValueList can contain only one + AttributeValue of type String, Number, or Binary (not a set type). + If an item contains an AttributeValue element of a different type + than the one specified in the request, the value does not match. + For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GE` : Greater than or equal. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `GT` : Greater than. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If an item contains an AttributeValue element of a different + type than the one specified in the request, the value does not + match. For example, `{"S":"6"}` does not equal `{"N":"6"}`. Also, + `{"N":"6"}` does not compare to `{"NS":["6", "2", "1"]}`. >
  • + + `NOT_NULL` : The attribute exists. `NOT_NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + existence of an attribute, not its data type. If the data type of + attribute " `a`" is null, and you evaluate it using `NOT_NULL`, the + result is a Boolean true . This result is because the attribute " + `a`" exists; its data type is not relevant to the `NOT_NULL` + comparison operator. + + `NULL` : The attribute does not exist. `NULL` is supported for all + datatypes, including lists and maps. This operator tests for the + nonexistence of an attribute, not its data type. If the data type + of attribute " `a`" is null, and you evaluate it using `NULL`, the + result is a Boolean false . This is because the attribute " `a`" + exists; its data type is not relevant to the `NULL` comparison + operator. + + `CONTAINS` : Checks for a subsequence, or value in a set. + AttributeValueList can contain only one AttributeValue element of + type String, Number, or Binary (not a set type). If the target + attribute of the comparison is of type String, then the operator + checks for a substring match. If the target attribute of the + comparison is of type Binary, then the operator looks for a + subsequence of the target that matches the input. If the target + attribute of the comparison is a set (" `SS`", " `NS`", or " + `BS`"), then the operator evaluates to true if it finds an exact + match with any member of the set. CONTAINS is supported for lists: + When evaluating " `a CONTAINS b`", " `a`" can be a list; however, " + `b`" cannot be a set, a map, or a list. + + `NOT_CONTAINS` : Checks for absence of a subsequence, or absence of a + value in a set. AttributeValueList can contain only one + AttributeValue element of type String, Number, or Binary (not a set + type). If the target attribute of the comparison is a String, then + the operator checks for the absence of a substring match. If the + target attribute of the comparison is Binary, then the operator + checks for the absence of a subsequence of the target that matches + the input. If the target attribute of the comparison is a set (" + `SS`", " `NS`", or " `BS`"), then the operator evaluates to true if + it does not find an exact match with any member of the set. + NOT_CONTAINS is supported for lists: When evaluating " `a NOT + CONTAINS b`", " `a`" can be a list; however, " `b`" cannot be a + set, a map, or a list. + + `BEGINS_WITH` : Checks for a prefix. AttributeValueList can contain + only one AttributeValue of type String or Binary (not a Number or a + set type). The target attribute of the comparison must be of type + String or Binary (not a Number or a set type). >
  • + + `IN` : Checks for matching elements within two sets. + AttributeValueList can contain one or more AttributeValue elements + of type String, Number, or Binary (not a set type). These + attributes are compared against an existing set type attribute of + an item. If any elements of the input set are present in the item + attribute, the expression evaluates to true. + + `BETWEEN` : Greater than or equal to the first value, and less than + or equal to the second value. AttributeValueList must contain two + AttributeValue elements of the same type, either String, Number, or + Binary (not a set type). A target attribute matches if the target + value is greater than, or equal to, the first element and less + than, or equal to, the second element. If an item contains an + AttributeValue element of a different type than the one specified + in the request, the value does not match. For example, `{"S":"6"}` + does not compare to `{"N":"6"}`. Also, `{"N":"6"}` does not compare + to `{"NS":["6", "2", "1"]}` + + + + For usage examples of AttributeValueList and ComparisonOperator , see + `Legacy Conditional Parameters`_ in the Amazon DynamoDB Developer + Guide . + + For backward compatibility with previous DynamoDB releases, the + following parameters can be used instead of AttributeValueList and + ComparisonOperator : + + + + Value - A value for DynamoDB to compare with an attribute. + + Exists - A Boolean value that causes DynamoDB to evaluate the value + before attempting the conditional operation: + + + If Exists is `True`, DynamoDB will check to see if that attribute + value already exists in the table. If it is found, then the + condition evaluates to true; otherwise the condition evaluate to + false. + + If Exists is `False`, DynamoDB assumes that the attribute value does + not exist in the table. If in fact the value does not exist, then + the assumption is valid and the condition evaluates to true. If the + value is found, despite the assumption that it does not exist, the + condition evaluates to false. + Note that the default value for Exists is `True`. + + + The Value and Exists parameters are incompatible with + AttributeValueList and ComparisonOperator . Note that if you use + both sets of parameters at once, DynamoDB will return a + ValidationException exception. + + :type conditional_operator: string + :param conditional_operator: + There is a newer parameter available. Use ConditionExpression instead. + Note that if you use ConditionalOperator and ConditionExpression at + the same time, DynamoDB will return a ValidationException + exception. + + This parameter does not support lists or maps. + + A logical operator to apply to the conditions in the Expected map: + + + + `AND` - If all of the conditions evaluate to true, then the entire + map evaluates to true. + + `OR` - If at least one of the conditions evaluate to true, then the + entire map evaluates to true. + + + If you omit ConditionalOperator , then `AND` is the default. + + The operation will succeed only if the entire map evaluates to true. + + :type return_values: string + :param return_values: + Use ReturnValues if you want to get the item attributes as they + appeared either before or after they were updated. For UpdateItem , + the valid values are: + + + + `NONE` - If ReturnValues is not specified, or if its value is `NONE`, + then nothing is returned. (This setting is the default for + ReturnValues .) + + `ALL_OLD` - If UpdateItem overwrote an attribute name-value pair, + then the content of the old item is returned. + + `UPDATED_OLD` - The old versions of only the updated attributes are + returned. + + `ALL_NEW` - All of the attributes of the new version of the item are + returned. + + `UPDATED_NEW` - The new versions of only the updated attributes are + returned. + + :type return_consumed_capacity: string + :param return_consumed_capacity: A value that if set to `TOTAL`, the + response includes ConsumedCapacity data for tables and indexes. If + set to `INDEXES`, the response includes ConsumedCapacity for + indexes. If set to `NONE` (the default), ConsumedCapacity is not + included in the response. + + :type return_item_collection_metrics: string + :param return_item_collection_metrics: A value that if set to `SIZE`, + the response includes statistics about item collections, if any, + that were modified during the operation are returned in the + response. If set to `NONE` (the default), no statistics are + returned. + + :type update_expression: string + :param update_expression: An expression that defines one or more + attributes to be updated, the action to be performed on them, and + new value(s) for them. + The following action values are available for UpdateExpression . + + + + `SET` - Adds one or more attributes and values to an item. If any of + these attribute already exist, they are replaced by the new values. + You can also use `SET` to add or subtract from an attribute that is + of type Number. `SET` supports the following functions: + + + `if_not_exists (path, operand)` - if the item does not contain an + attribute at the specified path, then `if_not_exists` evaluates to + operand; otherwise, it evaluates to path. You can use this function + to avoid overwriting an attribute that may already be present in + the item. + + `list_append (operand, operand)` - evaluates to a list with a new + element added to it. You can append the new element to the start or + the end of the list by reversing the order of the operands. + These function names are case-sensitive. + + `REMOVE` - Removes one or more attributes from an item. + + `ADD` - Adds the specified value to the item, if the attribute does + not already exist. If the attribute does exist, then the behavior + of `ADD` depends on the data type of the attribute: + + + If the existing attribute is a number, and if Value is also a number, + then Value is mathematically added to the existing attribute. If + Value is a negative number, then it is subtracted from the existing + attribute. If you use `ADD` to increment or decrement a number + value for an item that doesn't exist before the update, DynamoDB + uses `0` as the initial value. Similarly, if you use `ADD` for an + existing item to increment or decrement an attribute value that + doesn't exist before the update, DynamoDB uses `0` as the initial + value. For example, suppose that the item you want to update + doesn't have an attribute named itemcount , but you decide to `ADD` + the number `3` to this attribute anyway. DynamoDB will create the + itemcount attribute, set its initial value to `0`, and finally add + `3` to it. The result will be a new itemcount attribute in the + item, with a value of `3`. + + If the existing data type is a set and if Value is also a set, then + Value is added to the existing set. For example, if the attribute + value is the set `[1,2]`, and the `ADD` action specified `[3]`, + then the final attribute value is `[1,2,3]`. An error occurs if an + `ADD` action is specified for a set attribute and the attribute + type specified does not match the existing set type. Both sets must + have the same primitive data type. For example, if the existing + data type is a set of strings, the Value must also be a set of + strings. + The `ADD` action only supports Number and set data types. In addition, + `ADD` can only be used on top-level attributes, not nested + attributes. + + `DELETE` - Deletes an element from a set. If a set of values is + specified, then those values are subtracted from the old set. For + example, if the attribute value was the set `[a,b,c]` and the + `DELETE` action specifies `[a,c]`, then the final attribute value + is `[b]`. Specifying an empty set is an error. The `DELETE` action + only supports Number and set data types. In addition, `DELETE` can + only be used on top-level attributes, not nested attributes. + + + You can have many actions in a single expression, such as the + following: `SET a=:value1, b=:value2 DELETE :value3, :value4, + :value5` + + For more information on update expressions, go to `Modifying Items and + Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type condition_expression: string + :param condition_expression: A condition that must be satisfied in + order for a conditional update to succeed. + An expression can contain any of the following: + + + + Boolean functions: `attribute_exists | attribute_not_exists | + contains | begins_with` These function names are case-sensitive. + + Comparison operators: ` = | <> | < | > | <= + | >= | BETWEEN | IN` + + Logical operators: `AND | OR | NOT` + + + For more information on condition expressions, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_names: map + :param expression_attribute_names: One or more substitution tokens for + simplifying complex expressions. The following are some use cases + for using ExpressionAttributeNames : + + + To shorten an attribute name that is very long or unwieldy in an + expression. + + To create a placeholder for repeating occurrences of an attribute + name in an expression. + + To prevent special characters in an attribute name from being + misinterpreted in an expression. + + + Use the **#** character in an expression to dereference an attribute + name. For example, consider the following expression: + + + + `order.customerInfo.LastName = "Smith" OR order.customerInfo.LastName + = "Jones"` + + + Now suppose that you specified the following for + ExpressionAttributeNames : + + + + `{"#name":"order.customerInfo.LastName"}` + + + The expression can now be simplified as follows: + + + + `#name = "Smith" OR #name = "Jones"` + + + For more information on expression attribute names, go to `Accessing + Item Attributes`_ in the Amazon DynamoDB Developer Guide . + + :type expression_attribute_values: map + :param expression_attribute_values: One or more values that can be + substituted in an expression. + Use the **:** (colon) character in an expression to dereference an + attribute value. For example, suppose that you wanted to check + whether the value of the ProductStatus attribute was one of the + following: + + `Available | Backordered | Discontinued` + + You would first need to specify ExpressionAttributeValues as follows: + + `{ ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + ":disc":{"S":"Discontinued"} }` + + You could then use these values in an expression, such as this: + + `ProductStatus IN (:avail, :back, :disc)` + + For more information on expression attribute values, go to `Specifying + Conditions`_ in the Amazon DynamoDB Developer Guide . + + """ + params = {'TableName': table_name, 'Key': key, } + if attribute_updates is not None: + params['AttributeUpdates'] = attribute_updates + if expected is not None: + params['Expected'] = expected + if conditional_operator is not None: + params['ConditionalOperator'] = conditional_operator + if return_values is not None: + params['ReturnValues'] = return_values + if return_consumed_capacity is not None: + params['ReturnConsumedCapacity'] = return_consumed_capacity + if return_item_collection_metrics is not None: + params['ReturnItemCollectionMetrics'] = return_item_collection_metrics + if update_expression is not None: + params['UpdateExpression'] = update_expression + if condition_expression is not None: + params['ConditionExpression'] = condition_expression + if expression_attribute_names is not None: + params['ExpressionAttributeNames'] = expression_attribute_names + if expression_attribute_values is not None: + params['ExpressionAttributeValues'] = expression_attribute_values + return self.make_request(action='UpdateItem', + body=json.dumps(params)) + + def update_table(self, table_name, provisioned_throughput=None, + global_secondary_index_updates=None, + attribute_definitions=None): + """ + Updates the provisioned throughput for the given table, or + manages the global secondary indexes on the table. + + You can increase or decrease the table's provisioned + throughput values within the maximums and minimums listed in + the `Limits`_ section in the Amazon DynamoDB Developer Guide . + + In addition, you can use UpdateTable to add, modify or delete + global secondary indexes on the table. For more information, + see `Managing Global Secondary Indexes`_ in the Amazon + DynamoDB Developer Guide . + + The table must be in the `ACTIVE` state for UpdateTable to + succeed. UpdateTable is an asynchronous operation; while + executing the operation, the table is in the `UPDATING` state. + While the table is in the `UPDATING` state, the table still + has the provisioned throughput from before the call. The + table's new provisioned throughput settings go into effect + when the table returns to the `ACTIVE` state; at that point, + the UpdateTable operation is complete. + + :type attribute_definitions: list + :param attribute_definitions: An array of attributes that describe the + key schema for the table and indexes. If you are adding a new + global secondary index to the table, AttributeDefinitions must + include the key element(s) of the new index. + + :type table_name: string + :param table_name: The name of the table to be updated. + + :type provisioned_throughput: dict + :param provisioned_throughput: Represents the provisioned throughput + settings for a specified table or index. The settings can be + modified using the UpdateTable operation. + For current minimum and maximum provisioned throughput values, see + `Limits`_ in the Amazon DynamoDB Developer Guide . + + :type global_secondary_index_updates: list + :param global_secondary_index_updates: + An array of one or more global secondary indexes for the table. For + each index in the array, you can specify one action: + + + + Create - add a new global secondary index to the table. + + Update - modify the provisioned throughput settings of an existing + global secondary index. + + Delete - remove a global secondary index from the table. + + """ + params = {'TableName': table_name, } + if attribute_definitions is not None: + params['AttributeDefinitions'] = attribute_definitions + if provisioned_throughput is not None: + params['ProvisionedThroughput'] = provisioned_throughput + if global_secondary_index_updates is not None: + params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates + return self.make_request(action='UpdateTable', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.host, + 'Content-Type': 'application/x-amz-json-1.0', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body, host=self.host) + response = self._mexe(http_request, sender=None, + override_num_retries=self.NumberRetries, + retry_handler=self._retry_handler) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + + def _retry_handler(self, response, i, next_sleep): + status = None + boto.log.debug("Saw HTTP status: %s" % response.status) + if response.status == 400: + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + data = json.loads(response_body) + if 'ProvisionedThroughputExceededException' in data.get('__type'): + self.throughput_exceeded_events += 1 + msg = "%s, retry attempt %s" % ( + 'ProvisionedThroughputExceededException', + i + ) + next_sleep = self._truncated_exponential_time(i) + i += 1 + status = (msg, i, next_sleep) + if i == self.NumberRetries: + # If this was our last retry attempt, raise + # a specific error saying that the throughput + # was exceeded. + raise exceptions.ProvisionedThroughputExceededException( + response.status, response.reason, data) + elif 'ConditionalCheckFailedException' in data.get('__type'): + raise exceptions.ConditionalCheckFailedException( + response.status, response.reason, data) + elif 'ValidationException' in data.get('__type'): + raise exceptions.ValidationException( + response.status, response.reason, data) + else: + raise self.ResponseError(response.status, response.reason, + data) + expected_crc32 = response.getheader('x-amz-crc32') + if self._validate_checksums and expected_crc32 is not None: + boto.log.debug('Validating crc32 checksum for body: %s', + response.read()) + actual_crc32 = crc32(response.read()) & 0xffffffff + expected_crc32 = int(expected_crc32) + if actual_crc32 != expected_crc32: + msg = ("The calculated checksum %s did not match the expected " + "checksum %s" % (actual_crc32, expected_crc32)) + status = (msg, i + 1, self._truncated_exponential_time(i)) + return status + + def _truncated_exponential_time(self, i): + if i == 0: + next_sleep = 0 + else: + next_sleep = min(0.05 * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + return next_sleep diff --git a/ext/boto/dynamodb2/results.py b/ext/boto/dynamodb2/results.py new file mode 100644 index 0000000000..36f04d0a96 --- /dev/null +++ b/ext/boto/dynamodb2/results.py @@ -0,0 +1,204 @@ +class ResultSet(object): + """ + A class used to lazily handle page-to-page navigation through a set of + results. + + It presents a transparent iterator interface, so that all the user has + to do is use it in a typical ``for`` loop (or list comprehension, etc.) + to fetch results, even if they weren't present in the current page of + results. + + This is used by the ``Table.query`` & ``Table.scan`` methods. + + Example:: + + >>> users = Table('users') + >>> results = ResultSet() + >>> results.to_call(users.query, username__gte='johndoe') + # Now iterate. When it runs out of results, it'll fetch the next page. + >>> for res in results: + ... print res['username'] + + """ + def __init__(self, max_page_size=None): + super(ResultSet, self).__init__() + self.the_callable = None + self.call_args = [] + self.call_kwargs = {} + self._results = [] + self._offset = -1 + self._results_left = True + self._last_key_seen = None + self._fetches = 0 + self._max_page_size = max_page_size + self._limit = None + + @property + def first_key(self): + return 'exclusive_start_key' + + def _reset(self): + """ + Resets the internal state of the ``ResultSet``. + + This prevents results from being cached long-term & consuming + excess memory. + + Largely internal. + """ + self._results = [] + self._offset = 0 + + def __iter__(self): + return self + + def __next__(self): + self._offset += 1 + + if self._offset >= len(self._results): + if self._results_left is False: + raise StopIteration() + + self.fetch_more() + + # It's possible that previous call to ``fetch_more`` may not return + # anything useful but there may be more results. Loop until we get + # something back, making sure we guard for no results left. + while not len(self._results) and self._results_left: + self.fetch_more() + + if self._offset < len(self._results): + if self._limit is not None: + self._limit -= 1 + + if self._limit < 0: + raise StopIteration() + + return self._results[self._offset] + else: + raise StopIteration() + + next = __next__ + + def to_call(self, the_callable, *args, **kwargs): + """ + Sets up the callable & any arguments to run it with. + + This is stored for subsequent calls so that those queries can be + run without requiring user intervention. + + Example:: + + # Just an example callable. + >>> def squares_to(y): + ... for x in range(1, y): + ... yield x**2 + >>> rs = ResultSet() + # Set up what to call & arguments. + >>> rs.to_call(squares_to, y=3) + + """ + if not callable(the_callable): + raise ValueError( + 'You must supply an object or function to be called.' + ) + + # We pop the ``limit``, if present, to track how many we should return + # to the user. This isn't the same as the ``limit`` that the low-level + # DDB api calls use (which limit page size, not the overall result set). + self._limit = kwargs.pop('limit', None) + + if self._limit is not None and self._limit < 0: + self._limit = None + + self.the_callable = the_callable + self.call_args = args + self.call_kwargs = kwargs + + def fetch_more(self): + """ + When the iterator runs out of results, this method is run to re-execute + the callable (& arguments) to fetch the next page. + + Largely internal. + """ + self._reset() + + args = self.call_args[:] + kwargs = self.call_kwargs.copy() + + if self._last_key_seen is not None: + kwargs[self.first_key] = self._last_key_seen + + # If the page size is greater than limit set them + # to the same value + if self._limit and self._max_page_size and self._max_page_size > self._limit: + self._max_page_size = self._limit + + # Put in the max page size. + if self._max_page_size is not None: + kwargs['limit'] = self._max_page_size + elif self._limit is not None: + # If max_page_size is not set and limit is available + # use it as the page size + kwargs['limit'] = self._limit + + results = self.the_callable(*args, **kwargs) + self._fetches += 1 + new_results = results.get('results', []) + self._last_key_seen = results.get('last_key', None) + + if len(new_results): + self._results.extend(results['results']) + + # Check the limit, if it's present. + if self._limit is not None and self._limit >= 0: + limit = self._limit + limit -= len(results['results']) + # If we've exceeded the limit, we don't have any more + # results to look for. + if limit <= 0: + self._results_left = False + + if self._last_key_seen is None: + self._results_left = False + + +class BatchGetResultSet(ResultSet): + def __init__(self, *args, **kwargs): + self._keys_left = kwargs.pop('keys', []) + self._max_batch_get = kwargs.pop('max_batch_get', 100) + super(BatchGetResultSet, self).__init__(*args, **kwargs) + + def fetch_more(self): + self._reset() + + args = self.call_args[:] + kwargs = self.call_kwargs.copy() + + # Slice off the max we can fetch. + kwargs['keys'] = self._keys_left[:self._max_batch_get] + self._keys_left = self._keys_left[self._max_batch_get:] + + if len(self._keys_left) <= 0: + self._results_left = False + + results = self.the_callable(*args, **kwargs) + + if not len(results.get('results', [])): + return + + self._results.extend(results['results']) + + for offset, key_data in enumerate(results.get('unprocessed_keys', [])): + # We've got an unprocessed key. Reinsert it into the list. + # DynamoDB only returns valid keys, so there should be no risk of + # missing keys ever making it here. + self._keys_left.insert(offset, key_data) + + if len(self._keys_left) > 0: + self._results_left = True + + # Decrease the limit, if it's present. + if self.call_kwargs.get('limit'): + self.call_kwargs['limit'] -= len(results['results']) diff --git a/ext/boto/dynamodb2/table.py b/ext/boto/dynamodb2/table.py new file mode 100644 index 0000000000..8dfbea9c92 --- /dev/null +++ b/ext/boto/dynamodb2/table.py @@ -0,0 +1,1723 @@ +import boto +from boto.dynamodb2 import exceptions +from boto.dynamodb2.fields import (HashKey, RangeKey, + AllIndex, KeysOnlyIndex, IncludeIndex, + GlobalAllIndex, GlobalKeysOnlyIndex, + GlobalIncludeIndex) +from boto.dynamodb2.items import Item +from boto.dynamodb2.layer1 import DynamoDBConnection +from boto.dynamodb2.results import ResultSet, BatchGetResultSet +from boto.dynamodb2.types import (NonBooleanDynamizer, Dynamizer, FILTER_OPERATORS, + QUERY_OPERATORS, STRING) +from boto.exception import JSONResponseError + + +class Table(object): + """ + Interacts & models the behavior of a DynamoDB table. + + The ``Table`` object represents a set (or rough categorization) of + records within DynamoDB. The important part is that all records within the + table, while largely-schema-free, share the same schema & are essentially + namespaced for use in your application. For example, you might have a + ``users`` table or a ``forums`` table. + """ + max_batch_get = 100 + + _PROJECTION_TYPE_TO_INDEX = dict( + global_indexes=dict( + ALL=GlobalAllIndex, + KEYS_ONLY=GlobalKeysOnlyIndex, + INCLUDE=GlobalIncludeIndex, + ), local_indexes=dict( + ALL=AllIndex, + KEYS_ONLY=KeysOnlyIndex, + INCLUDE=IncludeIndex, + ) + ) + + def __init__(self, table_name, schema=None, throughput=None, indexes=None, + global_indexes=None, connection=None): + """ + Sets up a new in-memory ``Table``. + + This is useful if the table already exists within DynamoDB & you simply + want to use it for additional interactions. The only required parameter + is the ``table_name``. However, under the hood, the object will call + ``describe_table`` to determine the schema/indexes/throughput. You + can avoid this extra call by passing in ``schema`` & ``indexes``. + + **IMPORTANT** - If you're creating a new ``Table`` for the first time, + you should use the ``Table.create`` method instead, as it will + persist the table structure to DynamoDB. + + Requires a ``table_name`` parameter, which should be a simple string + of the name of the table. + + Optionally accepts a ``schema`` parameter, which should be a list of + ``BaseSchemaField`` subclasses representing the desired schema. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``indexes`` parameter, which should be a list of + ``BaseIndexField`` subclasses representing the desired indexes. + + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + + Optionally accepts a ``connection`` parameter, which should be a + ``DynamoDBConnection`` instance (or subclass). This is primarily useful + for specifying alternate connection parameters. + + Example:: + + # The simple, it-already-exists case. + >>> conn = Table('users') + + # The full, minimum-extra-calls case. + >>> from boto import dynamodb2 + >>> users = Table('users', schema=[ + ... HashKey('username'), + ... RangeKey('date_joined', data_type=NUMBER) + ... ], throughput={ + ... 'read':20, + ... 'write': 10, + ... }, indexes=[ + ... KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username') + ... RangeKey('date_joined') + ... ]), + ... ], global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write':10, + ... }), + ... ], connection=dynamodb2.connect_to_region('us-west-2', + ... aws_access_key_id='key', + ... aws_secret_access_key='key', + ... )) + + """ + self.table_name = table_name + self.connection = connection + self.throughput = { + 'read': 5, + 'write': 5, + } + self.schema = schema + self.indexes = indexes + self.global_indexes = global_indexes + + if self.connection is None: + self.connection = DynamoDBConnection() + + if throughput is not None: + self.throughput = throughput + + self._dynamizer = NonBooleanDynamizer() + + def use_boolean(self): + self._dynamizer = Dynamizer() + + @classmethod + def create(cls, table_name, schema, throughput=None, indexes=None, + global_indexes=None, connection=None): + """ + Creates a new table in DynamoDB & returns an in-memory ``Table`` object. + + This will setup a brand new table within DynamoDB. The ``table_name`` + must be unique for your AWS account. The ``schema`` is also required + to define the key structure of the table. + + **IMPORTANT** - You should consider the usage pattern of your table + up-front, as the schema can **NOT** be modified once the table is + created, requiring the creation of a new table & migrating the data + should you wish to revise it. + + **IMPORTANT** - If the table already exists in DynamoDB, additional + calls to this method will result in an error. If you just need + a ``Table`` object to interact with the existing table, you should + just initialize a new ``Table`` object, which requires only the + ``table_name``. + + Requires a ``table_name`` parameter, which should be a simple string + of the name of the table. + + Requires a ``schema`` parameter, which should be a list of + ``BaseSchemaField`` subclasses representing the desired schema. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``indexes`` parameter, which should be a list of + ``BaseIndexField`` subclasses representing the desired indexes. + + Optionally accepts a ``global_indexes`` parameter, which should be a + list of ``GlobalBaseIndexField`` subclasses representing the desired + indexes. + + Optionally accepts a ``connection`` parameter, which should be a + ``DynamoDBConnection`` instance (or subclass). This is primarily useful + for specifying alternate connection parameters. + + Example:: + + >>> users = Table.create('users', schema=[ + ... HashKey('username'), + ... RangeKey('date_joined', data_type=NUMBER) + ... ], throughput={ + ... 'read':20, + ... 'write': 10, + ... }, indexes=[ + ... KeysOnlyIndex('MostRecentlyJoined', parts=[ + ... HashKey('username'), + ... RangeKey('date_joined'), + ... ]), global_indexes=[ + ... GlobalAllIndex('UsersByZipcode', parts=[ + ... HashKey('zipcode'), + ... RangeKey('username'), + ... ], + ... throughput={ + ... 'read':10, + ... 'write':10, + ... }), + ... ]) + + """ + table = cls(table_name=table_name, connection=connection) + table.schema = schema + + if throughput is not None: + table.throughput = throughput + + if indexes is not None: + table.indexes = indexes + + if global_indexes is not None: + table.global_indexes = global_indexes + + # Prep the schema. + raw_schema = [] + attr_defs = [] + seen_attrs = set() + + for field in table.schema: + raw_schema.append(field.schema()) + # Build the attributes off what we know. + seen_attrs.add(field.name) + attr_defs.append(field.definition()) + + raw_throughput = { + 'ReadCapacityUnits': int(table.throughput['read']), + 'WriteCapacityUnits': int(table.throughput['write']), + } + kwargs = {} + + kwarg_map = { + 'indexes': 'local_secondary_indexes', + 'global_indexes': 'global_secondary_indexes', + } + for index_attr in ('indexes', 'global_indexes'): + table_indexes = getattr(table, index_attr) + if table_indexes: + raw_indexes = [] + for index_field in table_indexes: + raw_indexes.append(index_field.schema()) + # Make sure all attributes specified in the indexes are + # added to the definition + for field in index_field.parts: + if field.name not in seen_attrs: + seen_attrs.add(field.name) + attr_defs.append(field.definition()) + + kwargs[kwarg_map[index_attr]] = raw_indexes + + table.connection.create_table( + table_name=table.table_name, + attribute_definitions=attr_defs, + key_schema=raw_schema, + provisioned_throughput=raw_throughput, + **kwargs + ) + return table + + def _introspect_schema(self, raw_schema, raw_attributes=None): + """ + Given a raw schema structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + schema = [] + sane_attributes = {} + + if raw_attributes: + for field in raw_attributes: + sane_attributes[field['AttributeName']] = field['AttributeType'] + + for field in raw_schema: + data_type = sane_attributes.get(field['AttributeName'], STRING) + + if field['KeyType'] == 'HASH': + schema.append( + HashKey(field['AttributeName'], data_type=data_type) + ) + elif field['KeyType'] == 'RANGE': + schema.append( + RangeKey(field['AttributeName'], data_type=data_type) + ) + else: + raise exceptions.UnknownSchemaFieldError( + "%s was seen, but is unknown. Please report this at " + "https://github.com/boto/boto/issues." % field['KeyType'] + ) + + return schema + + def _introspect_all_indexes(self, raw_indexes, map_indexes_projection): + """ + Given a raw index/global index structure back from a DynamoDB response, + parse out & build the high-level Python objects that represent them. + """ + indexes = [] + + for field in raw_indexes: + index_klass = map_indexes_projection.get('ALL') + kwargs = { + 'parts': [] + } + + if field['Projection']['ProjectionType'] == 'ALL': + index_klass = map_indexes_projection.get('ALL') + elif field['Projection']['ProjectionType'] == 'KEYS_ONLY': + index_klass = map_indexes_projection.get('KEYS_ONLY') + elif field['Projection']['ProjectionType'] == 'INCLUDE': + index_klass = map_indexes_projection.get('INCLUDE') + kwargs['includes'] = field['Projection']['NonKeyAttributes'] + else: + raise exceptions.UnknownIndexFieldError( + "%s was seen, but is unknown. Please report this at " + "https://github.com/boto/boto/issues." % \ + field['Projection']['ProjectionType'] + ) + + name = field['IndexName'] + kwargs['parts'] = self._introspect_schema(field['KeySchema'], None) + indexes.append(index_klass(name, **kwargs)) + + return indexes + + def _introspect_indexes(self, raw_indexes): + """ + Given a raw index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_indexes, self._PROJECTION_TYPE_TO_INDEX.get('local_indexes')) + + def _introspect_global_indexes(self, raw_global_indexes): + """ + Given a raw global index structure back from a DynamoDB response, parse + out & build the high-level Python objects that represent them. + """ + return self._introspect_all_indexes( + raw_global_indexes, + self._PROJECTION_TYPE_TO_INDEX.get('global_indexes')) + + def describe(self): + """ + Describes the current structure of the table in DynamoDB. + + This information will be used to update the ``schema``, ``indexes``, + ``global_indexes`` and ``throughput`` information on the ``Table``. Some + calls, such as those involving creating keys or querying, will require + this information to be populated. + + It also returns the full raw data structure from DynamoDB, in the + event you'd like to parse out additional information (such as the + ``ItemCount`` or usage information). + + Example:: + + >>> users.describe() + { + # Lots of keys here... + } + >>> len(users.schema) + 2 + + """ + result = self.connection.describe_table(self.table_name) + + # Blindly update throughput, since what's on DynamoDB's end is likely + # more correct. + raw_throughput = result['Table']['ProvisionedThroughput'] + self.throughput['read'] = int(raw_throughput['ReadCapacityUnits']) + self.throughput['write'] = int(raw_throughput['WriteCapacityUnits']) + + if not self.schema: + # Since we have the data, build the schema. + raw_schema = result['Table'].get('KeySchema', []) + raw_attributes = result['Table'].get('AttributeDefinitions', []) + self.schema = self._introspect_schema(raw_schema, raw_attributes) + + if not self.indexes: + # Build the index information as well. + raw_indexes = result['Table'].get('LocalSecondaryIndexes', []) + self.indexes = self._introspect_indexes(raw_indexes) + + # Build the global index information as well. + raw_global_indexes = result['Table'].get('GlobalSecondaryIndexes', []) + self.global_indexes = self._introspect_global_indexes(raw_global_indexes) + + # This is leaky. + return result + + def update(self, throughput=None, global_indexes=None): + """ + Updates table attributes and global indexes in DynamoDB. + + Optionally accepts a ``throughput`` parameter, which should be a + dictionary. If provided, it should specify a ``read`` & ``write`` key, + both of which should have an integer value associated with them. + + Optionally accepts a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. If you are writing + new code, please use ``Table.update_global_secondary_index``. + + Returns ``True`` on success. + + Example:: + + # For a read-heavier application... + >>> users.update(throughput={ + ... 'read': 20, + ... 'write': 10, + ... }) + True + + # To also update the global index(es) throughput. + >>> users.update(throughput={ + ... 'read': 20, + ... 'write': 10, + ... }, + ... global_secondary_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + """ + + data = None + + if throughput: + self.throughput = throughput + data = { + 'ReadCapacityUnits': int(self.throughput['read']), + 'WriteCapacityUnits': int(self.throughput['write']), + } + + gsi_data = None + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + if throughput or global_indexes: + self.connection.update_table( + self.table_name, + provisioned_throughput=data, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide either the throughput or the ' \ + 'global_indexes to update method' + boto.log.error(msg) + + return False + + def create_global_secondary_index(self, global_index): + """ + Creates a global index in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + ``GlobalBaseIndexField`` subclass representing the desired index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To create a global index + >>> users.create_global_secondary_index( + ... global_index=GlobalAllIndex( + ... 'TheIndexNameHere', parts=[ + ... HashKey('requiredHashkey', data_type=STRING), + ... RangeKey('optionalRangeKey', data_type=STRING) + ... ], + ... throughput={ + ... 'read': 2, + ... 'write': 1, + ... }) + ... ) + True + + """ + + if global_index: + gsi_data = [] + gsi_data_attr_def = [] + + gsi_data.append({ + "Create": global_index.schema() + }) + + for attr_def in global_index.parts: + gsi_data_attr_def.append(attr_def.definition()) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + attribute_definitions=gsi_data_attr_def + ) + + return True + else: + msg = 'You need to provide the global_index to ' \ + 'create_global_secondary_index method' + boto.log.error(msg) + + return False + + def delete_global_secondary_index(self, global_index_name): + """ + Deletes a global index in DynamoDB after the table has been created. + + Requires a ``global_index_name`` parameter, which should be a simple + string of the name of the global secondary index. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To delete a global index + >>> users.delete_global_secondary_index('TheIndexNameHere') + True + + """ + + if global_index_name: + gsi_data = [ + { + "Delete": { + "IndexName": global_index_name + } + } + ] + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + + return True + else: + msg = 'You need to provide the global index name to ' \ + 'delete_global_secondary_index method' + boto.log.error(msg) + + return False + + def update_global_secondary_index(self, global_indexes): + """ + Updates a global index(es) in DynamoDB after the table has been created. + + Requires a ``global_indexes`` parameter, which should be a + dictionary. If provided, it should specify the index name, which is also + a dict containing a ``read`` & ``write`` key, both of which + should have an integer value associated with them. + + To update ``global_indexes`` information on the ``Table``, you'll need + to call ``Table.describe``. + + Returns ``True`` on success. + + Example:: + + # To update a global index + >>> users.update_global_secondary_index(global_indexes={ + ... 'TheIndexNameHere': { + ... 'read': 15, + ... 'write': 5, + ... } + ... }) + True + + """ + + if global_indexes: + gsi_data = [] + + for gsi_name, gsi_throughput in global_indexes.items(): + gsi_data.append({ + "Update": { + "IndexName": gsi_name, + "ProvisionedThroughput": { + "ReadCapacityUnits": int(gsi_throughput['read']), + "WriteCapacityUnits": int(gsi_throughput['write']), + }, + }, + }) + + self.connection.update_table( + self.table_name, + global_secondary_index_updates=gsi_data, + ) + return True + else: + msg = 'You need to provide the global indexes to ' \ + 'update_global_secondary_index method' + boto.log.error(msg) + + return False + + def delete(self): + """ + Deletes a table in DynamoDB. + + **IMPORTANT** - Be careful when using this method, there is no undo. + + Returns ``True`` on success. + + Example:: + + >>> users.delete() + True + + """ + self.connection.delete_table(self.table_name) + return True + + def _encode_keys(self, keys): + """ + Given a flat Python dictionary of keys/values, converts it into the + nested dictionary DynamoDB expects. + + Converts:: + + { + 'username': 'john', + 'tags': [1, 2, 5], + } + + ...to...:: + + { + 'username': {'S': 'john'}, + 'tags': {'NS': ['1', '2', '5']}, + } + + """ + raw_key = {} + + for key, value in keys.items(): + raw_key[key] = self._dynamizer.encode(value) + + return raw_key + + def get_item(self, consistent=False, attributes=None, **kwargs): + """ + Fetches an item (record) from a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldname to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns an ``Item`` instance containing all the data for that record. + + Raises an ``ItemNotFound`` exception if the item is not found. + + Example:: + + # A simple hash key. + >>> john = users.get_item(username='johndoe') + >>> john['first_name'] + 'John' + + # A complex hash+range key. + >>> john = users.get_item(username='johndoe', last_name='Doe') + >>> john['first_name'] + 'John' + + # A consistent read (assuming the data might have just changed). + >>> john = users.get_item(username='johndoe', consistent=True) + >>> john['first_name'] + 'Johann' + + # With a key that is an invalid variable name in Python. + # Also, assumes a different schema than previous examples. + >>> john = users.get_item(**{ + ... 'date-joined': 127549192, + ... }) + >>> john['first_name'] + 'John' + + """ + raw_key = self._encode_keys(kwargs) + item_data = self.connection.get_item( + self.table_name, + raw_key, + attributes_to_get=attributes, + consistent_read=consistent + ) + if 'Item' not in item_data: + raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs) + item = Item(self) + item.load(item_data) + return item + + def has_item(self, **kwargs): + """ + Return whether an item (record) exists within a table in DynamoDB. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will perform + a consistent (but more expensive) read from DynamoDB. + (Default: ``False``) + + Optionally accepts an ``attributes`` parameter, which should be a + list of fieldnames to fetch. (Default: ``None``, which means all fields + should be fetched) + + Returns ``True`` if an ``Item`` is present, ``False`` if not. + + Example:: + + # Simple, just hash-key schema. + >>> users.has_item(username='johndoe') + True + + # Complex schema, item not present. + >>> users.has_item( + ... username='johndoe', + ... date_joined='2014-01-07' + ... ) + False + + """ + try: + self.get_item(**kwargs) + except (JSONResponseError, exceptions.ItemNotFound): + return False + + return True + + def lookup(self, *args, **kwargs): + """ + Look up an entry in DynamoDB. This is mostly backwards compatible + with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first, + although you may still specify keyword arguments instead. + + Also unlike the get_item command, if the returned item has no keys + (i.e., it does not exist in DynamoDB), a None result is returned, instead + of an empty key object. + + Example:: + >>> user = users.lookup(username) + >>> user = users.lookup(username, consistent=True) + >>> app = apps.lookup('my_customer_id', 'my_app_id') + + """ + if not self.schema: + self.describe() + for x, arg in enumerate(args): + kwargs[self.schema[x].name] = arg + ret = self.get_item(**kwargs) + if not ret.keys(): + return None + return ret + + def new_item(self, *args): + """ + Returns a new, blank item + + This is mostly for consistency with boto.dynamodb + """ + if not self.schema: + self.describe() + data = {} + for x, arg in enumerate(args): + data[self.schema[x].name] = arg + return Item(self, data=data) + + def put_item(self, data, overwrite=False): + """ + Saves an entire item to DynamoDB. + + By default, if any part of the ``Item``'s original data doesn't match + what's currently in DynamoDB, this request will fail. This prevents + other processes from updating the data in between when you read the + item & when your request to update the item's data is processed, which + would typically result in some data loss. + + Requires a ``data`` parameter, which should be a dictionary of the data + you'd like to store in DynamoDB. + + Optionally accepts an ``overwrite`` parameter, which should be a + boolean. If you provide ``True``, this will tell DynamoDB to blindly + overwrite whatever data is present, if any. + + Returns ``True`` on success. + + Example:: + + >>> users.put_item(data={ + ... 'username': 'jane', + ... 'first_name': 'Jane', + ... 'last_name': 'Doe', + ... 'date_joined': 126478915, + ... }) + True + + """ + item = Item(self, data=data) + return item.save(overwrite=overwrite) + + def _put_item(self, item_data, expects=None): + """ + The internal variant of ``put_item`` (full data). This is used by the + ``Item`` objects, since that operation is represented at the + table-level by the API, but conceptually maps better to telling an + individual ``Item`` to save itself. + """ + kwargs = {} + + if expects is not None: + kwargs['expected'] = expects + + self.connection.put_item(self.table_name, item_data, **kwargs) + return True + + def _update_item(self, key, item_data, expects=None): + """ + The internal variant of ``put_item`` (partial data). This is used by the + ``Item`` objects, since that operation is represented at the + table-level by the API, but conceptually maps better to telling an + individual ``Item`` to save itself. + """ + raw_key = self._encode_keys(key) + kwargs = {} + + if expects is not None: + kwargs['expected'] = expects + + self.connection.update_item(self.table_name, raw_key, item_data, **kwargs) + return True + + def delete_item(self, expected=None, conditional_operator=None, **kwargs): + """ + Deletes a single item. You can perform a conditional delete operation + that deletes the item if it exists, or if it has an expected attribute + value. + + Conditional deletes are useful for only deleting items if specific + conditions are met. If those conditions are met, DynamoDB performs + the delete. Otherwise, the item is not deleted. + + To specify the expected attribute values of the item, you can pass a + dictionary of conditions to ``expected``. Each condition should follow + the pattern ``__=``. + + **IMPORTANT** - Be careful when using this method, there is no undo. + + To specify the key of the item you'd like to get, you can specify the + key attributes as kwargs. + + Optionally accepts an ``expected`` parameter which is a dictionary of + expected attribute value conditions. + + Optionally accepts a ``conditional_operator`` which applies to the + expected attribute value conditions: + + + `AND` - If all of the conditions evaluate to true (default) + + `OR` - True if at least one condition evaluates to true + + Returns ``True`` on success, ``False`` on failed conditional delete. + + Example:: + + # A simple hash key. + >>> users.delete_item(username='johndoe') + True + + # A complex hash+range key. + >>> users.delete_item(username='jane', last_name='Doe') + True + + # With a key that is an invalid variable name in Python. + # Also, assumes a different schema than previous examples. + >>> users.delete_item(**{ + ... 'date-joined': 127549192, + ... }) + True + + # Conditional delete + >>> users.delete_item(username='johndoe', + ... expected={'balance__eq': 0}) + True + """ + expected = self._build_filters(expected, using=FILTER_OPERATORS) + raw_key = self._encode_keys(kwargs) + + try: + self.connection.delete_item(self.table_name, raw_key, + expected=expected, + conditional_operator=conditional_operator) + except exceptions.ConditionalCheckFailedException: + return False + + return True + + def get_key_fields(self): + """ + Returns the fields necessary to make a key for a table. + + If the ``Table`` does not already have a populated ``schema``, + this will request it via a ``Table.describe`` call. + + Returns a list of fieldnames (strings). + + Example:: + + # A simple hash key. + >>> users.get_key_fields() + ['username'] + + # A complex hash+range key. + >>> users.get_key_fields() + ['username', 'last_name'] + + """ + if not self.schema: + # We don't know the structure of the table. Get a description to + # populate the schema. + self.describe() + + return [field.name for field in self.schema] + + def batch_write(self): + """ + Allows the batching of writes to DynamoDB. + + Since each write/delete call to DynamoDB has a cost associated with it, + when loading lots of data, it makes sense to batch them, creating as + few calls as possible. + + This returns a context manager that will transparently handle creating + these batches. The object you get back lightly-resembles a ``Table`` + object, sharing just the ``put_item`` & ``delete_item`` methods + (which are all that DynamoDB can batch in terms of writing data). + + DynamoDB's maximum batch size is 25 items per request. If you attempt + to put/delete more than that, the context manager will batch as many + as it can up to that number, then flush them to DynamoDB & continue + batching as more calls come in. + + Example:: + + # Assuming a table with one record... + >>> with users.batch_write() as batch: + ... batch.put_item(data={ + ... 'username': 'johndoe', + ... 'first_name': 'John', + ... 'last_name': 'Doe', + ... 'owner': 1, + ... }) + ... # Nothing across the wire yet. + ... batch.delete_item(username='bob') + ... # Still no requests sent. + ... batch.put_item(data={ + ... 'username': 'jane', + ... 'first_name': 'Jane', + ... 'last_name': 'Doe', + ... 'date_joined': 127436192, + ... }) + ... # Nothing yet, but once we leave the context, the + ... # put/deletes will be sent. + + """ + # PHENOMENAL COSMIC DOCS!!! itty-bitty code. + return BatchTable(self) + + def _build_filters(self, filter_kwargs, using=QUERY_OPERATORS): + """ + An internal method for taking query/scan-style ``**kwargs`` & turning + them into the raw structure DynamoDB expects for filtering. + """ + if filter_kwargs is None: + return + + filters = {} + + for field_and_op, value in filter_kwargs.items(): + field_bits = field_and_op.split('__') + fieldname = '__'.join(field_bits[:-1]) + + try: + op = using[field_bits[-1]] + except KeyError: + raise exceptions.UnknownFilterTypeError( + "Operator '%s' from '%s' is not recognized." % ( + field_bits[-1], + field_and_op + ) + ) + + lookup = { + 'AttributeValueList': [], + 'ComparisonOperator': op, + } + + # Special-case the ``NULL/NOT_NULL`` case. + if field_bits[-1] == 'null': + del lookup['AttributeValueList'] + + if value is False: + lookup['ComparisonOperator'] = 'NOT_NULL' + else: + lookup['ComparisonOperator'] = 'NULL' + # Special-case the ``BETWEEN`` case. + elif field_bits[-1] == 'between': + if len(value) == 2 and isinstance(value, (list, tuple)): + lookup['AttributeValueList'].append( + self._dynamizer.encode(value[0]) + ) + lookup['AttributeValueList'].append( + self._dynamizer.encode(value[1]) + ) + # Special-case the ``IN`` case + elif field_bits[-1] == 'in': + for val in value: + lookup['AttributeValueList'].append(self._dynamizer.encode(val)) + else: + # Fix up the value for encoding, because it was built to only work + # with ``set``s. + if isinstance(value, (list, tuple)): + value = set(value) + lookup['AttributeValueList'].append( + self._dynamizer.encode(value) + ) + + # Finally, insert it into the filters. + filters[fieldname] = lookup + + return filters + + def query(self, limit=None, index=None, reverse=False, consistent=False, + attributes=None, max_page_size=None, **filter_kwargs): + """ + **WARNING:** This method is provided **strictly** for + backward-compatibility. It returns results in an incorrect order. + + If you are writing new code, please use ``Table.query_2``. + """ + reverse = not reverse + return self.query_2(limit=limit, index=index, reverse=reverse, + consistent=consistent, attributes=attributes, + max_page_size=max_page_size, **filter_kwargs) + + def query_2(self, limit=None, index=None, reverse=False, + consistent=False, attributes=None, max_page_size=None, + query_filter=None, conditional_operator=None, + **filter_kwargs): + """ + Queries for a set of matching items in a DynamoDB table. + + Queries can be performed against a hash key, a hash+range key or + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. + + **Note** - You can not query against arbitrary fields within the data + stored in DynamoDB unless you specify ``query_filter`` values. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. Query filters + are specified in the same way. + + Optionally accepts a ``limit`` parameter, which should be an integer + count of the total number of items to return. (Default: ``None`` - + all results) + + Optionally accepts an ``index`` parameter, which should be a string of + name of the local secondary index you want to query against. + (Default: ``None``) + + Optionally accepts a ``reverse`` parameter, which will present the + results in reverse order. (Default: ``False`` - normal order) + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will force a consistent read of + the data (more expensive). (Default: ``False`` - use eventually + consistent reads) + + Optionally accepts a ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + + Returns a ``ResultSet`` containing ``Item``s, which transparently handles the pagination of + results you get back. + + Example:: + + # Look for last names equal to "Doe". + >>> results = users.query(last_name__eq='Doe') + >>> for res in results: + ... print res['first_name'] + 'John' + 'Jane' + + # Look for last names beginning with "D", in reverse order, limit 3. + >>> results = users.query( + ... last_name__beginswith='D', + ... reverse=True, + ... limit=3 + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'Jane' + 'John' + + # Use an LSI & a consistent read. + >>> results = users.query( + ... date_joined__gte=1236451000, + ... owner__eq=1, + ... index='DateJoinedIndex', + ... consistent=True + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'Bob' + 'John' + 'Fred' + + # Filter by non-indexed field(s) + >>> results = users.query( + ... last_name__eq='Doe', + ... reverse=True, + ... query_filter={ + ... 'first_name__beginswith': 'A' + ... } + ... ) + >>> for res in results: + ... print res['first_name'] + ' ' + res['last_name'] + 'Alice Doe' + + """ + if self.schema: + if len(self.schema) == 1: + if len(filter_kwargs) <= 1: + if not self.global_indexes or not len(self.global_indexes): + # If the schema only has one field, there's <= 1 filter + # param & no Global Secondary Indexes, this is user + # error. Bail early. + raise exceptions.QueryError( + "You must specify more than one key to filter on." + ) + + if attributes is not None: + select = 'SPECIFIC_ATTRIBUTES' + else: + select = None + + results = ResultSet( + max_page_size=max_page_size + ) + kwargs = filter_kwargs.copy() + kwargs.update({ + 'limit': limit, + 'index': index, + 'reverse': reverse, + 'consistent': consistent, + 'select': select, + 'attributes_to_get': attributes, + 'query_filter': query_filter, + 'conditional_operator': conditional_operator, + }) + results.to_call(self._query, **kwargs) + return results + + def query_count(self, index=None, consistent=False, conditional_operator=None, + query_filter=None, scan_index_forward=True, limit=None, + exclusive_start_key=None, **filter_kwargs): + """ + Queries the exact count of matching items in a DynamoDB table. + + Queries can be performed against a hash key, a hash+range key or + against any data stored in your local secondary indexes. Query filters + can be used to filter on arbitrary fields. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. Query filters + are specified in the same way. + + Optionally accepts an ``index`` parameter, which should be a string of + name of the local secondary index you want to query against. + (Default: ``None``) + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, it will force a consistent read of + the data (more expensive). (Default: ``False`` - use eventually + consistent reads) + + Optionally accepts a ``query_filter`` which is a dictionary of filter + conditions against any arbitrary field in the returned data. + + Optionally accepts a ``conditional_operator`` which applies to the + query filter conditions: + + + `AND` - True if all filter conditions evaluate to true (default) + + `OR` - True if at least one filter condition evaluates to true + + Optionally accept a ``exclusive_start_key`` which is used to get + the remaining items when a query cannot return the complete count. + + Returns an integer which represents the exact amount of matched + items. + + :type scan_index_forward: boolean + :param scan_index_forward: Specifies ascending (true) or descending + (false) traversal of the index. DynamoDB returns results reflecting + the requested order determined by the range key. If the data type + is Number, the results are returned in numeric order. For String, + the results are returned in order of ASCII character code values. + For Binary, DynamoDB treats each byte of the binary data as + unsigned when it compares binary values. + + If ScanIndexForward is not specified, the results are returned in + ascending order. + + :type limit: integer + :param limit: The maximum number of items to evaluate (not necessarily + the number of matching items). + + Example:: + + # Look for last names equal to "Doe". + >>> users.query_count(last_name__eq='Doe') + 5 + + # Use an LSI & a consistent read. + >>> users.query_count( + ... date_joined__gte=1236451000, + ... owner__eq=1, + ... index='DateJoinedIndex', + ... consistent=True + ... ) + 2 + + """ + key_conditions = self._build_filters( + filter_kwargs, + using=QUERY_OPERATORS + ) + + built_query_filter = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + + count_buffer = 0 + last_evaluated_key = exclusive_start_key + + while True: + raw_results = self.connection.query( + self.table_name, + index_name=index, + consistent_read=consistent, + select='COUNT', + key_conditions=key_conditions, + query_filter=built_query_filter, + conditional_operator=conditional_operator, + limit=limit, + scan_index_forward=scan_index_forward, + exclusive_start_key=last_evaluated_key + ) + + count_buffer += int(raw_results.get('Count', 0)) + last_evaluated_key = raw_results.get('LastEvaluatedKey') + if not last_evaluated_key or count_buffer < 1: + break + + return count_buffer + + def _query(self, limit=None, index=None, reverse=False, consistent=False, + exclusive_start_key=None, select=None, attributes_to_get=None, + query_filter=None, conditional_operator=None, **filter_kwargs): + """ + The internal method that performs the actual queries. Used extensively + by ``ResultSet`` to perform each (paginated) request. + """ + kwargs = { + 'limit': limit, + 'index_name': index, + 'consistent_read': consistent, + 'select': select, + 'attributes_to_get': attributes_to_get, + 'conditional_operator': conditional_operator, + } + + if reverse: + kwargs['scan_index_forward'] = False + + if exclusive_start_key: + kwargs['exclusive_start_key'] = {} + + for key, value in exclusive_start_key.items(): + kwargs['exclusive_start_key'][key] = \ + self._dynamizer.encode(value) + + # Convert the filters into something we can actually use. + kwargs['key_conditions'] = self._build_filters( + filter_kwargs, + using=QUERY_OPERATORS + ) + + kwargs['query_filter'] = self._build_filters( + query_filter, + using=FILTER_OPERATORS + ) + + raw_results = self.connection.query( + self.table_name, + **kwargs + ) + results = [] + last_key = None + + for raw_item in raw_results.get('Items', []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + if raw_results.get('LastEvaluatedKey', None): + last_key = {} + + for key, value in raw_results['LastEvaluatedKey'].items(): + last_key[key] = self._dynamizer.decode(value) + + return { + 'results': results, + 'last_key': last_key, + } + + def scan(self, limit=None, segment=None, total_segments=None, + max_page_size=None, attributes=None, conditional_operator=None, + **filter_kwargs): + """ + Scans across all items within a DynamoDB table. + + Scans can be performed against a hash key or a hash+range key. You can + additionally filter the results after the table has been read but + before the response is returned by using query filters. + + To specify the filters of the items you'd like to get, you can specify + the filters as kwargs. Each filter kwarg should follow the pattern + ``__=``. + + Optionally accepts a ``limit`` parameter, which should be an integer + count of the total number of items to return. (Default: ``None`` - + all results) + + Optionally accepts a ``segment`` parameter, which should be an integer + of the segment to retrieve on. Please see the documentation about + Parallel Scans (Default: ``None`` - no segments) + + Optionally accepts a ``total_segments`` parameter, which should be an + integer count of number of segments to divide the table into. + Please see the documentation about Parallel Scans (Default: ``None`` - + no segments) + + Optionally accepts a ``max_page_size`` parameter, which should be an + integer count of the maximum number of items to retrieve + **per-request**. This is useful in making faster requests & prevent + the scan from drowning out other queries. (Default: ``None`` - + fetch as many as DynamoDB will return) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. This uses the ``AttributesToGet`` and set's + ``Select`` to ``SPECIFIC_ATTRIBUTES`` API. + + Returns a ``ResultSet``, which transparently handles the pagination of + results you get back. + + Example:: + + # All results. + >>> everything = users.scan() + + # Look for last names beginning with "D". + >>> results = users.scan(last_name__beginswith='D') + >>> for res in results: + ... print res['first_name'] + 'Alice' + 'John' + 'Jane' + + # Use an ``IN`` filter & limit. + >>> results = users.scan( + ... age__in=[25, 26, 27, 28, 29], + ... limit=1 + ... ) + >>> for res in results: + ... print res['first_name'] + 'Alice' + + """ + results = ResultSet( + max_page_size=max_page_size + ) + kwargs = filter_kwargs.copy() + kwargs.update({ + 'limit': limit, + 'segment': segment, + 'total_segments': total_segments, + 'attributes': attributes, + 'conditional_operator': conditional_operator, + }) + results.to_call(self._scan, **kwargs) + return results + + def _scan(self, limit=None, exclusive_start_key=None, segment=None, + total_segments=None, attributes=None, conditional_operator=None, + **filter_kwargs): + """ + The internal method that performs the actual scan. Used extensively + by ``ResultSet`` to perform each (paginated) request. + """ + kwargs = { + 'limit': limit, + 'segment': segment, + 'total_segments': total_segments, + 'attributes_to_get': attributes, + 'conditional_operator': conditional_operator, + } + + if exclusive_start_key: + kwargs['exclusive_start_key'] = {} + + for key, value in exclusive_start_key.items(): + kwargs['exclusive_start_key'][key] = \ + self._dynamizer.encode(value) + + # Convert the filters into something we can actually use. + kwargs['scan_filter'] = self._build_filters( + filter_kwargs, + using=FILTER_OPERATORS + ) + + raw_results = self.connection.scan( + self.table_name, + **kwargs + ) + results = [] + last_key = None + + for raw_item in raw_results.get('Items', []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + if raw_results.get('LastEvaluatedKey', None): + last_key = {} + + for key, value in raw_results['LastEvaluatedKey'].items(): + last_key[key] = self._dynamizer.decode(value) + + return { + 'results': results, + 'last_key': last_key, + } + + def batch_get(self, keys, consistent=False, attributes=None): + """ + Fetches many specific items in batch from a table. + + Requires a ``keys`` parameter, which should be a list of dictionaries. + Each dictionary should consist of the keys values to specify. + + Optionally accepts a ``consistent`` parameter, which should be a + boolean. If you provide ``True``, a strongly consistent read will be + used. (Default: False) + + Optionally accepts an ``attributes`` parameter, which should be a + tuple. If you provide any attributes only these will be fetched + from DynamoDB. + + Returns a ``ResultSet``, which transparently handles the pagination of + results you get back. + + Example:: + + >>> results = users.batch_get(keys=[ + ... { + ... 'username': 'johndoe', + ... }, + ... { + ... 'username': 'jane', + ... }, + ... { + ... 'username': 'fred', + ... }, + ... ]) + >>> for res in results: + ... print res['first_name'] + 'John' + 'Jane' + 'Fred' + + """ + # We pass the keys to the constructor instead, so it can maintain it's + # own internal state as to what keys have been processed. + results = BatchGetResultSet(keys=keys, max_batch_get=self.max_batch_get) + results.to_call(self._batch_get, consistent=consistent, attributes=attributes) + return results + + def _batch_get(self, keys, consistent=False, attributes=None): + """ + The internal method that performs the actual batch get. Used extensively + by ``BatchGetResultSet`` to perform each (paginated) request. + """ + items = { + self.table_name: { + 'Keys': [], + }, + } + + if consistent: + items[self.table_name]['ConsistentRead'] = True + + if attributes is not None: + items[self.table_name]['AttributesToGet'] = attributes + + for key_data in keys: + raw_key = {} + + for key, value in key_data.items(): + raw_key[key] = self._dynamizer.encode(value) + + items[self.table_name]['Keys'].append(raw_key) + + raw_results = self.connection.batch_get_item(request_items=items) + results = [] + unprocessed_keys = [] + + for raw_item in raw_results['Responses'].get(self.table_name, []): + item = Item(self) + item.load({ + 'Item': raw_item, + }) + results.append(item) + + raw_unprocessed = raw_results.get('UnprocessedKeys', {}).get(self.table_name, {}) + + for raw_key in raw_unprocessed.get('Keys', []): + py_key = {} + + for key, value in raw_key.items(): + py_key[key] = self._dynamizer.decode(value) + + unprocessed_keys.append(py_key) + + return { + 'results': results, + # NEVER return a ``last_key``. Just in-case any part of + # ``ResultSet`` peeks through, since much of the + # original underlying implementation is based on this key. + 'last_key': None, + 'unprocessed_keys': unprocessed_keys, + } + + def count(self): + """ + Returns a (very) eventually consistent count of the number of items + in a table. + + Lag time is about 6 hours, so don't expect a high degree of accuracy. + + Example:: + + >>> users.count() + 6 + + """ + info = self.describe() + return info['Table'].get('ItemCount', 0) + + +class BatchTable(object): + """ + Used by ``Table`` as the context manager for batch writes. + + You likely don't want to try to use this object directly. + """ + def __init__(self, table): + self.table = table + self._to_put = [] + self._to_delete = [] + self._unprocessed = [] + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + if self._to_put or self._to_delete: + # Flush anything that's left. + self.flush() + + if self._unprocessed: + # Finally, handle anything that wasn't processed. + self.resend_unprocessed() + + def put_item(self, data, overwrite=False): + self._to_put.append(data) + + if self.should_flush(): + self.flush() + + def delete_item(self, **kwargs): + self._to_delete.append(kwargs) + + if self.should_flush(): + self.flush() + + def should_flush(self): + if len(self._to_put) + len(self._to_delete) == 25: + return True + + return False + + def flush(self): + batch_data = { + self.table.table_name: [ + # We'll insert data here shortly. + ], + } + + for put in self._to_put: + item = Item(self.table, data=put) + batch_data[self.table.table_name].append({ + 'PutRequest': { + 'Item': item.prepare_full(), + } + }) + + for delete in self._to_delete: + batch_data[self.table.table_name].append({ + 'DeleteRequest': { + 'Key': self.table._encode_keys(delete), + } + }) + + resp = self.table.connection.batch_write_item(batch_data) + self.handle_unprocessed(resp) + + self._to_put = [] + self._to_delete = [] + return True + + def handle_unprocessed(self, resp): + if len(resp.get('UnprocessedItems', [])): + table_name = self.table.table_name + unprocessed = resp['UnprocessedItems'].get(table_name, []) + + # Some items have not been processed. Stow them for now & + # re-attempt processing on ``__exit__``. + msg = "%s items were unprocessed. Storing for later." + boto.log.info(msg % len(unprocessed)) + self._unprocessed.extend(unprocessed) + + def resend_unprocessed(self): + # If there are unprocessed records (for instance, the user was over + # their throughput limitations), iterate over them & send until they're + # all there. + boto.log.info( + "Re-sending %s unprocessed items." % len(self._unprocessed) + ) + + while len(self._unprocessed): + # Again, do 25 at a time. + to_resend = self._unprocessed[:25] + # Remove them from the list. + self._unprocessed = self._unprocessed[25:] + batch_data = { + self.table.table_name: to_resend + } + boto.log.info("Sending %s items" % len(to_resend)) + resp = self.table.connection.batch_write_item(batch_data) + self.handle_unprocessed(resp) + boto.log.info( + "%s unprocessed items left" % len(self._unprocessed) + ) diff --git a/ext/boto/dynamodb2/types.py b/ext/boto/dynamodb2/types.py new file mode 100644 index 0000000000..1216621ac5 --- /dev/null +++ b/ext/boto/dynamodb2/types.py @@ -0,0 +1,44 @@ +# Shadow the DynamoDB v1 bits. +# This way, no end user should have to cross-import between versions & we +# reserve the namespace to extend v2 if it's ever needed. +from boto.dynamodb.types import NonBooleanDynamizer, Dynamizer + + +# Some constants for our use. +STRING = 'S' +NUMBER = 'N' +BINARY = 'B' +STRING_SET = 'SS' +NUMBER_SET = 'NS' +BINARY_SET = 'BS' +NULL = 'NULL' +BOOLEAN = 'BOOL' +MAP = 'M' +LIST = 'L' + +QUERY_OPERATORS = { + 'eq': 'EQ', + 'lte': 'LE', + 'lt': 'LT', + 'gte': 'GE', + 'gt': 'GT', + 'beginswith': 'BEGINS_WITH', + 'between': 'BETWEEN', +} + +FILTER_OPERATORS = { + 'eq': 'EQ', + 'ne': 'NE', + 'lte': 'LE', + 'lt': 'LT', + 'gte': 'GE', + 'gt': 'GT', + # FIXME: Is this necessary? i.e. ``whatever__null=False`` + 'nnull': 'NOT_NULL', + 'null': 'NULL', + 'contains': 'CONTAINS', + 'ncontains': 'NOT_CONTAINS', + 'beginswith': 'BEGINS_WITH', + 'in': 'IN', + 'between': 'BETWEEN', +} diff --git a/ext/boto/ec2/__init__.py b/ext/boto/ec2/__init__.py new file mode 100644 index 0000000000..cdf0916daf --- /dev/null +++ b/ext/boto/ec2/__init__.py @@ -0,0 +1,84 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +service from AWS. +""" +from boto.ec2.connection import EC2Connection +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.regioninfo import connect + + +RegionData = load_regions().get('ec2', {}) + + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the EC2Connection + object's constructor as keyword arguments and they will be + passed along to the EC2Connection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return get_regions('ec2', connection_cls=EC2Connection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.connection.EC2Connection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.connection.EC2Connection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + if 'region' in kw_params and isinstance(kw_params['region'], RegionInfo)\ + and region_name == kw_params['region'].name: + return EC2Connection(**kw_params) + + return connect('ec2', region_name, + connection_cls=EC2Connection, **kw_params) + + +def get_region(region_name, **kw_params): + """ + Find and return a :class:`boto.ec2.regioninfo.RegionInfo` object + given a region name. + + :type: str + :param: The name of the region. + + :rtype: :class:`boto.ec2.regioninfo.RegionInfo` + :return: The RegionInfo object for the given region or None if + an invalid region name is provided. + """ + for region in regions(**kw_params): + if region.name == region_name: + return region + return None diff --git a/ext/boto/ec2/address.py b/ext/boto/ec2/address.py new file mode 100644 index 0000000000..807406f78b --- /dev/null +++ b/ext/boto/ec2/address.py @@ -0,0 +1,130 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class Address(EC2Object): + """ + Represents an EC2 Elastic IP Address + + :ivar public_ip: The Elastic IP address. + :ivar instance_id: The instance the address is associated with (if any). + :ivar domain: Indicates whether the address is a EC2 address or a VPC address (standard|vpc). + :ivar allocation_id: The allocation ID for the address (VPC addresses only). + :ivar association_id: The association ID for the address (VPC addresses only). + :ivar network_interface_id: The network interface (if any) that the address is associated with (VPC addresses only). + :ivar network_interface_owner_id: The owner IID (VPC addresses only). + :ivar private_ip_address: The private IP address associated with the Elastic IP address (VPC addresses only). + """ + + def __init__(self, connection=None, public_ip=None, instance_id=None): + super(Address, self).__init__(connection) + self.connection = connection + self.public_ip = public_ip + self.instance_id = instance_id + self.domain = None + self.allocation_id = None + self.association_id = None + self.network_interface_id = None + self.network_interface_owner_id = None + self.private_ip_address = None + + def __repr__(self): + return 'Address:%s' % self.public_ip + + def endElement(self, name, value, connection): + if name == 'publicIp': + self.public_ip = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'domain': + self.domain = value + elif name == 'allocationId': + self.allocation_id = value + elif name == 'associationId': + self.association_id = value + elif name == 'networkInterfaceId': + self.network_interface_id = value + elif name == 'networkInterfaceOwnerId': + self.network_interface_owner_id = value + elif name == 'privateIpAddress': + self.private_ip_address = value + else: + setattr(self, name, value) + + def release(self, dry_run=False): + """ + Free up this Elastic IP address. + :see: :meth:`boto.ec2.connection.EC2Connection.release_address` + """ + if self.allocation_id: + return self.connection.release_address( + allocation_id=self.allocation_id, + dry_run=dry_run) + else: + return self.connection.release_address( + public_ip=self.public_ip, + dry_run=dry_run + ) + + delete = release + + def associate(self, instance_id=None, network_interface_id=None, private_ip_address=None, allow_reassociation=False, dry_run=False): + """ + Associate this Elastic IP address with a currently running instance. + :see: :meth:`boto.ec2.connection.EC2Connection.associate_address` + """ + if self.allocation_id: + return self.connection.associate_address( + instance_id=instance_id, + public_ip=self.public_ip, + allocation_id=self.allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) + return self.connection.associate_address( + instance_id=instance_id, + public_ip=self.public_ip, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, + dry_run=dry_run + ) + + def disassociate(self, dry_run=False): + """ + Disassociate this Elastic IP address from a currently running instance. + :see: :meth:`boto.ec2.connection.EC2Connection.disassociate_address` + """ + if self.association_id: + return self.connection.disassociate_address( + association_id=self.association_id, + dry_run=dry_run + ) + else: + return self.connection.disassociate_address( + public_ip=self.public_ip, + dry_run=dry_run + ) diff --git a/ext/boto/ec2/attributes.py b/ext/boto/ec2/attributes.py new file mode 100644 index 0000000000..d76e5c5428 --- /dev/null +++ b/ext/boto/ec2/attributes.py @@ -0,0 +1,71 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class AccountAttribute(object): + def __init__(self, connection=None): + self.connection = connection + self.attribute_name = None + self.attribute_values = None + + def startElement(self, name, attrs, connection): + if name == 'attributeValueSet': + self.attribute_values = AttributeValues() + return self.attribute_values + + def endElement(self, name, value, connection): + if name == 'attributeName': + self.attribute_name = value + + +class AttributeValues(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'attributeValue': + self.append(value) + + +class VPCAttribute(object): + def __init__(self, connection=None): + self.connection = connection + self.vpc_id = None + self.enable_dns_hostnames = None + self.enable_dns_support = None + self._current_attr = None + + def startElement(self, name, attrs, connection): + if name in ('enableDnsHostnames', 'enableDnsSupport'): + self._current_attr = name + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'value': + if value == 'true': + value = True + else: + value = False + if self._current_attr == 'enableDnsHostnames': + self.enable_dns_hostnames = value + elif self._current_attr == 'enableDnsSupport': + self.enable_dns_support = value diff --git a/ext/boto/ec2/autoscale/__init__.py b/ext/boto/ec2/autoscale/__init__.py new file mode 100644 index 0000000000..1bf2718317 --- /dev/null +++ b/ext/boto/ec2/autoscale/__init__.py @@ -0,0 +1,894 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# Copyright (c) 2011 Jann Kleen +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +Auto Scaling service. +""" + +import base64 + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.regioninfo import connect +from boto.ec2.autoscale.request import Request +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.group import AutoScalingGroup +from boto.ec2.autoscale.group import ProcessType +from boto.ec2.autoscale.activity import Activity +from boto.ec2.autoscale.policy import AdjustmentType +from boto.ec2.autoscale.policy import MetricCollectionTypes +from boto.ec2.autoscale.policy import ScalingPolicy +from boto.ec2.autoscale.policy import TerminationPolicies +from boto.ec2.autoscale.instance import Instance +from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction +from boto.ec2.autoscale.tag import Tag +from boto.ec2.autoscale.limits import AccountLimits +from boto.compat import six + +RegionData = load_regions().get('autoscaling', {}) + + +def regions(): + """ + Get all available regions for the Auto Scaling service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('autoscaling', connection_cls=AutoScaleConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.autoscale.AutoScaleConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.AutoScaleConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('autoscaling', region_name, + connection_cls=AutoScaleConnection, **kw_params) + + +class AutoScaleConnection(AWSQueryConnection): + APIVersion = boto.config.get('Boto', 'autoscale_version', '2011-01-01') + DefaultRegionEndpoint = boto.config.get('Boto', 'autoscale_endpoint', + 'autoscaling.us-east-1.amazonaws.com') + DefaultRegionName = boto.config.get('Boto', 'autoscale_region_name', + 'us-east-1') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None, + use_block_device_types=False): + """ + Init method to create a new connection to the AutoScaling service. + + B{Note:} The host argument is overridden by the host specified in the + boto configuration file. + + + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + AutoScaleConnection) + self.region = region + self.use_block_device_types = use_block_device_types + super(AutoScaleConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path=path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_list_params(self, params, items, label): + """ + Items is a list of dictionaries or strings:: + + [ + { + 'Protocol' : 'HTTP', + 'LoadBalancerPort' : '80', + 'InstancePort' : '80' + }, + .. + ] etc. + + or:: + + ['us-east-1b',...] + """ + # different from EC2 list params + for i in range(1, len(items) + 1): + if isinstance(items[i - 1], dict): + for k, v in six.iteritems(items[i - 1]): + if isinstance(v, dict): + for kk, vv in six.iteritems(v): + params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv + else: + params['%s.member.%d.%s' % (label, i, k)] = v + elif isinstance(items[i - 1], six.string_types): + params['%s.member.%d' % (label, i)] = items[i - 1] + + def _update_group(self, op, as_group): + params = {'AutoScalingGroupName': as_group.name, + 'LaunchConfigurationName': as_group.launch_config_name, + 'MinSize': as_group.min_size, + 'MaxSize': as_group.max_size} + # get availability zone information (required param) + zones = as_group.availability_zones + self.build_list_params(params, zones, 'AvailabilityZones') + if as_group.desired_capacity is not None: + params['DesiredCapacity'] = as_group.desired_capacity + if as_group.vpc_zone_identifier: + params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier + if as_group.health_check_period: + params['HealthCheckGracePeriod'] = as_group.health_check_period + if as_group.health_check_type: + params['HealthCheckType'] = as_group.health_check_type + if as_group.default_cooldown: + params['DefaultCooldown'] = as_group.default_cooldown + if as_group.placement_group: + params['PlacementGroup'] = as_group.placement_group + if as_group.instance_id: + params['InstanceId'] = as_group.instance_id + if as_group.termination_policies: + self.build_list_params(params, as_group.termination_policies, + 'TerminationPolicies') + if op.startswith('Create'): + # you can only associate load balancers with an autoscale + # group at creation time + if as_group.load_balancers: + self.build_list_params(params, as_group.load_balancers, + 'LoadBalancerNames') + if as_group.tags: + for i, tag in enumerate(as_group.tags): + tag.build_params(params, i + 1) + return self.get_object(op, params, Request) + + def attach_instances(self, name, instance_ids): + """ + Attach instances to an autoscaling group. + """ + params = { + 'AutoScalingGroupName': name, + } + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('AttachInstances', params) + + def detach_instances(self, name, instance_ids, decrement_capacity=True): + """ + Detach instances from an Auto Scaling group. + + :type name: str + :param name: The name of the Auto Scaling group from which to detach instances. + + :type instance_ids: list + :param instance_ids: Instance ids to be detached from the Auto Scaling group. + + :type decrement_capacity: bool + :param decrement_capacity: Whether to decrement the size of the + Auto Scaling group or not. + """ + + params = {'AutoScalingGroupName': name} + params['ShouldDecrementDesiredCapacity'] = 'true' if decrement_capacity else 'false' + + self.build_list_params(params, instance_ids, 'InstanceIds') + return self.get_status('DetachInstances', params) + + def create_auto_scaling_group(self, as_group): + """ + Create auto scaling group. + """ + return self._update_group('CreateAutoScalingGroup', as_group) + + def delete_auto_scaling_group(self, name, force_delete=False): + """ + Deletes the specified auto scaling group if the group has no instances + and no scaling activities in progress. + """ + if(force_delete): + params = {'AutoScalingGroupName': name, 'ForceDelete': 'true'} + else: + params = {'AutoScalingGroupName': name} + return self.get_object('DeleteAutoScalingGroup', params, Request) + + def create_launch_configuration(self, launch_config): + """ + Creates a new Launch Configuration. + + :type launch_config: :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` + :param launch_config: LaunchConfiguration object. + """ + params = {'ImageId': launch_config.image_id, + 'LaunchConfigurationName': launch_config.name, + 'InstanceType': launch_config.instance_type} + if launch_config.key_name: + params['KeyName'] = launch_config.key_name + if launch_config.user_data: + user_data = launch_config.user_data + if isinstance(user_data, six.text_type): + user_data = user_data.encode('utf-8') + params['UserData'] = base64.b64encode(user_data).decode('utf-8') + if launch_config.kernel_id: + params['KernelId'] = launch_config.kernel_id + if launch_config.ramdisk_id: + params['RamdiskId'] = launch_config.ramdisk_id + if launch_config.block_device_mappings: + [x.autoscale_build_list_params(params) for x in launch_config.block_device_mappings] + if launch_config.security_groups: + self.build_list_params(params, launch_config.security_groups, + 'SecurityGroups') + if launch_config.instance_monitoring: + params['InstanceMonitoring.Enabled'] = 'true' + else: + params['InstanceMonitoring.Enabled'] = 'false' + if launch_config.spot_price is not None: + params['SpotPrice'] = str(launch_config.spot_price) + if launch_config.instance_profile_name is not None: + params['IamInstanceProfile'] = launch_config.instance_profile_name + if launch_config.ebs_optimized: + params['EbsOptimized'] = 'true' + else: + params['EbsOptimized'] = 'false' + if launch_config.associate_public_ip_address is True: + params['AssociatePublicIpAddress'] = 'true' + elif launch_config.associate_public_ip_address is False: + params['AssociatePublicIpAddress'] = 'false' + if launch_config.volume_type: + params['VolumeType'] = launch_config.volume_type + if launch_config.delete_on_termination: + params['DeleteOnTermination'] = 'true' + else: + params['DeleteOnTermination'] = 'false' + if launch_config.iops: + params['Iops'] = launch_config.iops + if launch_config.classic_link_vpc_id: + params['ClassicLinkVPCId'] = launch_config.classic_link_vpc_id + if launch_config.classic_link_vpc_security_groups: + self.build_list_params( + params, + launch_config.classic_link_vpc_security_groups, + 'ClassicLinkVPCSecurityGroups' + ) + return self.get_object('CreateLaunchConfiguration', params, + Request, verb='POST') + + def get_account_limits(self): + """ + Returns the limits for the Auto Scaling resources currently granted for + your AWS account. + """ + params = {} + return self.get_object('DescribeAccountLimits', params, AccountLimits) + + def create_scaling_policy(self, scaling_policy): + """ + Creates a new Scaling Policy. + + :type scaling_policy: :class:`boto.ec2.autoscale.policy.ScalingPolicy` + :param scaling_policy: ScalingPolicy object. + """ + params = {'AdjustmentType': scaling_policy.adjustment_type, + 'AutoScalingGroupName': scaling_policy.as_name, + 'PolicyName': scaling_policy.name, + 'ScalingAdjustment': scaling_policy.scaling_adjustment} + + if scaling_policy.adjustment_type == "PercentChangeInCapacity" and \ + scaling_policy.min_adjustment_step is not None: + params['MinAdjustmentStep'] = scaling_policy.min_adjustment_step + + if scaling_policy.cooldown is not None: + params['Cooldown'] = scaling_policy.cooldown + + return self.get_object('PutScalingPolicy', params, Request) + + def delete_launch_configuration(self, launch_config_name): + """ + Deletes the specified LaunchConfiguration. + + The specified launch configuration must not be attached to an Auto + Scaling group. Once this call completes, the launch configuration is no + longer available for use. + """ + params = {'LaunchConfigurationName': launch_config_name} + return self.get_object('DeleteLaunchConfiguration', params, Request) + + def get_all_groups(self, names=None, max_records=None, next_token=None): + """ + Returns a full description of each Auto Scaling group in the given + list. This includes all Amazon EC2 instances that are members of the + group. If a list of names is not provided, the service returns the full + details of all Auto Scaling groups. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter. + + :type names: list + :param names: List of group names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of groups to return. + + :rtype: list + :returns: List of :class:`boto.ec2.autoscale.group.AutoScalingGroup` + instances. + """ + params = {} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if names: + self.build_list_params(params, names, 'AutoScalingGroupNames') + return self.get_list('DescribeAutoScalingGroups', params, + [('member', AutoScalingGroup)]) + + def get_all_launch_configurations(self, **kwargs): + """ + Returns a full description of the launch configurations given the + specified names. + + If no names are specified, then the full details of all launch + configurations are returned. + + :type names: list + :param names: List of configuration names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of configurations to return. + + :type next_token: str + :param next_token: If you have more results than can be returned + at once, pass in this parameter to page through all results. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.launchconfig.LaunchConfiguration` + instances. + """ + params = {} + max_records = kwargs.get('max_records', None) + names = kwargs.get('names', None) + if max_records is not None: + params['MaxRecords'] = max_records + if names: + self.build_list_params(params, names, 'LaunchConfigurationNames') + next_token = kwargs.get('next_token') + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeLaunchConfigurations', params, + [('member', LaunchConfiguration)]) + + def get_all_activities(self, autoscale_group, activity_ids=None, + max_records=None, next_token=None): + """ + Get all activities for the given autoscaling group. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The auto scaling group to get activities on. + + :type max_records: int + :param max_records: Maximum amount of activities to return. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.activity.Activity` instances. + """ + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + params = {'AutoScalingGroupName': name} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if activity_ids: + self.build_list_params(params, activity_ids, 'ActivityIds') + return self.get_list('DescribeScalingActivities', + params, [('member', Activity)]) + + def get_termination_policies(self): + """Gets all valid termination policies. + + These values can then be used as the termination_policies arg + when creating and updating autoscale groups. + """ + return self.get_object('DescribeTerminationPolicyTypes', + {}, TerminationPolicies) + + def delete_scheduled_action(self, scheduled_action_name, + autoscale_group=None): + """ + Deletes a previously scheduled action. + + :type scheduled_action_name: str + :param scheduled_action_name: The name of the action you want + to delete. + + :type autoscale_group: str + :param autoscale_group: The name of the autoscale group. + """ + params = {'ScheduledActionName': scheduled_action_name} + if autoscale_group: + params['AutoScalingGroupName'] = autoscale_group + return self.get_status('DeleteScheduledAction', params) + + def terminate_instance(self, instance_id, decrement_capacity=True): + """ + Terminates the specified instance. The desired group size can + also be adjusted, if desired. + + :type instance_id: str + :param instance_id: The ID of the instance to be terminated. + + :type decrement_capability: bool + :param decrement_capacity: Whether to decrement the size of the + autoscaling group or not. + """ + params = {'InstanceId': instance_id} + if decrement_capacity: + params['ShouldDecrementDesiredCapacity'] = 'true' + else: + params['ShouldDecrementDesiredCapacity'] = 'false' + return self.get_object('TerminateInstanceInAutoScalingGroup', params, + Activity) + + def delete_policy(self, policy_name, autoscale_group=None): + """ + Delete a policy. + + :type policy_name: str + :param policy_name: The name or ARN of the policy to delete. + + :type autoscale_group: str + :param autoscale_group: The name of the autoscale group. + """ + params = {'PolicyName': policy_name} + if autoscale_group: + params['AutoScalingGroupName'] = autoscale_group + return self.get_status('DeletePolicy', params) + + def get_all_adjustment_types(self): + return self.get_list('DescribeAdjustmentTypes', {}, + [('member', AdjustmentType)]) + + def get_all_autoscaling_instances(self, instance_ids=None, + max_records=None, next_token=None): + """ + Returns a description of each Auto Scaling instance in the instance_ids + list. If a list is not provided, the service returns the full details + of all instances up to a maximum of fifty. + + This action supports pagination by returning a token if there are more + pages to retrieve. To get the next page, call this action again with + the returned token as the NextToken parameter. + + :type instance_ids: list + :param instance_ids: List of Autoscaling Instance IDs which should be + searched for. + + :type max_records: int + :param max_records: Maximum number of results to return. + + :rtype: list + :returns: List of + :class:`boto.ec2.autoscale.instance.Instance` objects. + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceIds') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeAutoScalingInstances', + params, [('member', Instance)]) + + def get_all_metric_collection_types(self): + """ + Returns a list of metrics and a corresponding list of granularities + for each metric. + """ + return self.get_object('DescribeMetricCollectionTypes', + {}, MetricCollectionTypes) + + def get_all_policies(self, as_group=None, policy_names=None, + max_records=None, next_token=None): + """ + Returns descriptions of what each policy does. This action supports + pagination. If the response includes a token, there are more records + available. To get the additional records, repeat the request with the + response token as the NextToken parameter. + + If no group name or list of policy names are provided, all + available policies are returned. + + :type as_group: str + :param as_group: The name of the + :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for. + + :type policy_names: list + :param policy_names: List of policy names which should be searched for. + + :type max_records: int + :param max_records: Maximum amount of groups to return. + + :type next_token: str + :param next_token: If you have more results than can be returned + at once, pass in this parameter to page through all results. + """ + params = {} + if as_group: + params['AutoScalingGroupName'] = as_group + if policy_names: + self.build_list_params(params, policy_names, 'PolicyNames') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribePolicies', params, + [('member', ScalingPolicy)]) + + def get_all_scaling_process_types(self): + """ + Returns scaling process types for use in the ResumeProcesses and + SuspendProcesses actions. + """ + return self.get_list('DescribeScalingProcessTypes', {}, + [('member', ProcessType)]) + + def suspend_processes(self, as_group, scaling_processes=None): + """ + Suspends Auto Scaling processes for an Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to suspend processes on. + + :type scaling_processes: list + :param scaling_processes: Processes you want to suspend. If omitted, + all processes will be suspended. + """ + params = {'AutoScalingGroupName': as_group} + if scaling_processes: + self.build_list_params(params, scaling_processes, + 'ScalingProcesses') + return self.get_status('SuspendProcesses', params) + + def resume_processes(self, as_group, scaling_processes=None): + """ + Resumes Auto Scaling processes for an Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to resume processes on. + + :type scaling_processes: list + :param scaling_processes: Processes you want to resume. If omitted, all + processes will be resumed. + """ + params = {'AutoScalingGroupName': as_group} + + if scaling_processes: + self.build_list_params(params, scaling_processes, + 'ScalingProcesses') + return self.get_status('ResumeProcesses', params) + + def create_scheduled_group_action(self, as_group, name, time=None, + desired_capacity=None, + min_size=None, max_size=None, + start_time=None, end_time=None, + recurrence=None): + """ + Creates a scheduled scaling action for a Auto Scaling group. If you + leave a parameter unspecified, the corresponding value remains + unchanged in the affected Auto Scaling group. + + :type as_group: string + :param as_group: The auto scaling group to get activities on. + + :type name: string + :param name: Scheduled action name. + + :type time: datetime.datetime + :param time: The time for this action to start. (Depracated) + + :type desired_capacity: int + :param desired_capacity: The number of EC2 instances that should + be running in this group. + + :type min_size: int + :param min_size: The minimum size for the new auto scaling group. + + :type max_size: int + :param max_size: The minimum size for the new auto scaling group. + + :type start_time: datetime.datetime + :param start_time: The time for this action to start. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop. + + :type end_time: datetime.datetime + :param end_time: The time for this action to end. When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action will start and stop. + + :type recurrence: string + :param recurrence: The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format. EXAMPLE: '0 10 * * *' + """ + params = {'AutoScalingGroupName': as_group, + 'ScheduledActionName': name} + if start_time is not None: + params['StartTime'] = start_time.isoformat() + if end_time is not None: + params['EndTime'] = end_time.isoformat() + if recurrence is not None: + params['Recurrence'] = recurrence + if time: + params['Time'] = time.isoformat() + if desired_capacity is not None: + params['DesiredCapacity'] = desired_capacity + if min_size is not None: + params['MinSize'] = min_size + if max_size is not None: + params['MaxSize'] = max_size + return self.get_status('PutScheduledUpdateGroupAction', params) + + def get_all_scheduled_actions(self, as_group=None, start_time=None, + end_time=None, scheduled_actions=None, + max_records=None, next_token=None): + params = {} + if as_group: + params['AutoScalingGroupName'] = as_group + if scheduled_actions: + self.build_list_params(params, scheduled_actions, + 'ScheduledActionNames') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeScheduledActions', params, + [('member', ScheduledUpdateGroupAction)]) + + def disable_metrics_collection(self, as_group, metrics=None): + """ + Disables monitoring of group metrics for the Auto Scaling group + specified in AutoScalingGroupName. You can specify the list of affected + metrics with the Metrics parameter. + """ + params = {'AutoScalingGroupName': as_group} + + if metrics: + self.build_list_params(params, metrics, 'Metrics') + return self.get_status('DisableMetricsCollection', params) + + def enable_metrics_collection(self, as_group, granularity, metrics=None): + """ + Enables monitoring of group metrics for the Auto Scaling group + specified in AutoScalingGroupName. You can specify the list of enabled + metrics with the Metrics parameter. + + Auto scaling metrics collection can be turned on only if the + InstanceMonitoring.Enabled flag, in the Auto Scaling group's launch + configuration, is set to true. + + :type autoscale_group: string + :param autoscale_group: The auto scaling group to get activities on. + + :type granularity: string + :param granularity: The granularity to associate with the metrics to + collect. Currently, the only legal granularity is "1Minute". + + :type metrics: string list + :param metrics: The list of metrics to collect. If no metrics are + specified, all metrics are enabled. + """ + params = {'AutoScalingGroupName': as_group, + 'Granularity': granularity} + if metrics: + self.build_list_params(params, metrics, 'Metrics') + return self.get_status('EnableMetricsCollection', params) + + def execute_policy(self, policy_name, as_group=None, honor_cooldown=None): + params = {'PolicyName': policy_name} + if as_group: + params['AutoScalingGroupName'] = as_group + if honor_cooldown: + params['HonorCooldown'] = honor_cooldown + return self.get_status('ExecutePolicy', params) + + def put_notification_configuration(self, autoscale_group, topic, notification_types): + """ + Configures an Auto Scaling group to send notifications when + specified events take place. + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The Auto Scaling group to put notification + configuration on. + + :type topic: str + :param topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (SNS) topic. + + :type notification_types: list + :param notification_types: The type of events that will trigger + the notification. Valid types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' + """ + + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + + params = {'AutoScalingGroupName': name, + 'TopicARN': topic} + self.build_list_params(params, notification_types, 'NotificationTypes') + return self.get_status('PutNotificationConfiguration', params) + + def delete_notification_configuration(self, autoscale_group, topic): + """ + Deletes notifications created by put_notification_configuration. + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The Auto Scaling group to put notification + configuration on. + + :type topic: str + :param topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (SNS) topic. + """ + + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + + params = {'AutoScalingGroupName': name, + 'TopicARN': topic} + + return self.get_status('DeleteNotificationConfiguration', params) + + def set_instance_health(self, instance_id, health_status, + should_respect_grace_period=True): + """ + Explicitly set the health status of an instance. + + :type instance_id: str + :param instance_id: The identifier of the EC2 instance. + + :type health_status: str + :param health_status: The health status of the instance. + "Healthy" means that the instance is healthy and should remain + in service. "Unhealthy" means that the instance is unhealthy. + Auto Scaling should terminate and replace it. + + :type should_respect_grace_period: bool + :param should_respect_grace_period: If True, this call should + respect the grace period associated with the group. + """ + params = {'InstanceId': instance_id, + 'HealthStatus': health_status} + if should_respect_grace_period: + params['ShouldRespectGracePeriod'] = 'true' + else: + params['ShouldRespectGracePeriod'] = 'false' + return self.get_status('SetInstanceHealth', params) + + def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False): + """ + Adjusts the desired size of the AutoScalingGroup by initiating scaling + activities. When reducing the size of the group, it is not possible to define + which Amazon EC2 instances will be terminated. This applies to any Auto Scaling + decisions that might result in terminating instances. + + :type group_name: string + :param group_name: name of the auto scaling group + + :type desired_capacity: integer + :param desired_capacity: new capacity setting for auto scaling group + + :type honor_cooldown: boolean + :param honor_cooldown: by default, overrides any cooldown period + """ + params = {'AutoScalingGroupName': group_name, + 'DesiredCapacity': desired_capacity} + if honor_cooldown: + params['HonorCooldown'] = 'true' + + return self.get_status('SetDesiredCapacity', params) + + # Tag methods + + def get_all_tags(self, filters=None, max_records=None, next_token=None): + """ + Lists the Auto Scaling group tags. + + This action supports pagination by returning a token if there + are more pages to retrieve. To get the next page, call this + action again with the returned token as the NextToken + parameter. + + :type filters: dict + :param filters: The value of the filter type used to identify + the tags to be returned. NOT IMPLEMENTED YET. + + :type max_records: int + :param max_records: Maximum number of tags to return. + + :rtype: list + :returns: List of :class:`boto.ec2.autoscale.tag.Tag` + instances. + """ + params = {} + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeTags', params, + [('member', Tag)]) + + def create_or_update_tags(self, tags): + """ + Creates new tags or updates existing tags for an Auto Scaling group. + + :type tags: List of :class:`boto.ec2.autoscale.tag.Tag` + :param tags: The new or updated tags. + """ + params = {} + for i, tag in enumerate(tags): + tag.build_params(params, i + 1) + return self.get_status('CreateOrUpdateTags', params, verb='POST') + + def delete_tags(self, tags): + """ + Deletes existing tags for an Auto Scaling group. + + :type tags: List of :class:`boto.ec2.autoscale.tag.Tag` + :param tags: The new or updated tags. + """ + params = {} + for i, tag in enumerate(tags): + tag.build_params(params, i + 1) + return self.get_status('DeleteTags', params, verb='POST') diff --git a/ext/boto/ec2/autoscale/activity.py b/ext/boto/ec2/autoscale/activity.py new file mode 100644 index 0000000000..bfe32f436d --- /dev/null +++ b/ext/boto/ec2/autoscale/activity.py @@ -0,0 +1,73 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from datetime import datetime + + +class Activity(object): + def __init__(self, connection=None): + self.connection = connection + self.start_time = None + self.end_time = None + self.activity_id = None + self.progress = None + self.status_code = None + self.cause = None + self.description = None + self.status_message = None + self.group_name = None + + def __repr__(self): + return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id, + self.group_name, + self.status_message, + self.cause) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ActivityId': + self.activity_id = value + elif name == 'AutoScalingGroupName': + self.group_name = value + elif name == 'StartTime': + try: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'EndTime': + try: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'Progress': + self.progress = value + elif name == 'Cause': + self.cause = value + elif name == 'Description': + self.description = value + elif name == 'StatusMessage': + self.status_message = value + elif name == 'StatusCode': + self.status_code = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/autoscale/group.py b/ext/boto/ec2/autoscale/group.py new file mode 100644 index 0000000000..c3c041275d --- /dev/null +++ b/ext/boto/ec2/autoscale/group.py @@ -0,0 +1,361 @@ +# Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.listelement import ListElement +from boto.resultset import ResultSet +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.request import Request +from boto.ec2.autoscale.instance import Instance +from boto.ec2.autoscale.tag import Tag + + +class ProcessType(object): + def __init__(self, connection=None): + self.connection = connection + self.process_name = None + + def __repr__(self): + return 'ProcessType(%s)' % self.process_name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ProcessName': + self.process_name = value + + +class SuspendedProcess(object): + def __init__(self, connection=None): + self.connection = connection + self.process_name = None + self.reason = None + + def __repr__(self): + return 'SuspendedProcess(%s, %s)' % (self.process_name, self.reason) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ProcessName': + self.process_name = value + elif name == 'SuspensionReason': + self.reason = value + + +class EnabledMetric(object): + def __init__(self, connection=None, metric=None, granularity=None): + self.connection = connection + self.metric = metric + self.granularity = granularity + + def __repr__(self): + return 'EnabledMetric(%s, %s)' % (self.metric, self.granularity) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Granularity': + self.granularity = value + elif name == 'Metric': + self.metric = value + + +class TerminationPolicies(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) + + +class AutoScalingGroup(object): + def __init__(self, connection=None, name=None, + launch_config=None, availability_zones=None, + load_balancers=None, default_cooldown=None, + health_check_type=None, health_check_period=None, + placement_group=None, vpc_zone_identifier=None, + desired_capacity=None, min_size=None, max_size=None, + tags=None, termination_policies=None, instance_id=None, + **kwargs): + """ + Creates a new AutoScalingGroup with the specified name. + + You must not have already used up your entire quota of + AutoScalingGroups in order for this call to be successful. Once the + creation request is completed, the AutoScalingGroup is ready to be + used in other calls. + + :type name: str + :param name: Name of autoscaling group (required). + + :type availability_zones: list + :param availability_zones: List of availability zones (required). + + :type default_cooldown: int + :param default_cooldown: Number of seconds after a Scaling Activity + completes before any further scaling activities can start. + + :type desired_capacity: int + :param desired_capacity: The desired capacity for the group. + + :type health_check_period: str + :param health_check_period: Length of time in seconds after a new + EC2 instance comes into service that Auto Scaling starts + checking its health. + + :type health_check_type: str + :param health_check_type: The service you want the health status from, + Amazon EC2 or Elastic Load Balancer. + + :type launch_config: str or LaunchConfiguration + :param launch_config: Name of launch configuration (required). + + :type load_balancers: list + :param load_balancers: List of load balancers. + + :type max_size: int + :param max_size: Maximum size of group (required). + + :type min_size: int + :param min_size: Minimum size of group (required). + + :type placement_group: str + :param placement_group: Physical location of your cluster placement + group created in Amazon EC2. + + :type vpc_zone_identifier: str or list + :param vpc_zone_identifier: A comma-separated string or python list of + the subnet identifiers of the Virtual Private Cloud. + + :type tags: list + :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s + + :type termination_policies: list + :param termination_policies: A list of termination policies. Valid values + are: "OldestInstance", "NewestInstance", "OldestLaunchConfiguration", + "ClosestToNextInstanceHour", "Default". If no value is specified, + the "Default" value is used. + + :type instance_id: str + :param instance_id: The ID of the Amazon EC2 instance you want to use + to create the Auto Scaling group. + + :rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup` + :return: An autoscale group. + """ + self.name = name or kwargs.get('group_name') # backwards compat + self.connection = connection + self.min_size = int(min_size) if min_size is not None else None + self.max_size = int(max_size) if max_size is not None else None + self.created_time = None + # backwards compatibility + default_cooldown = default_cooldown or kwargs.get('cooldown') + if default_cooldown is not None: + default_cooldown = int(default_cooldown) + self.default_cooldown = default_cooldown + self.launch_config_name = launch_config + if launch_config and isinstance(launch_config, LaunchConfiguration): + self.launch_config_name = launch_config.name + self.desired_capacity = desired_capacity + lbs = load_balancers or [] + self.load_balancers = ListElement(lbs) + zones = availability_zones or [] + self.availability_zones = ListElement(zones) + self.health_check_period = health_check_period + self.health_check_type = health_check_type + self.placement_group = placement_group + self.autoscaling_group_arn = None + if type(vpc_zone_identifier) is list: + vpc_zone_identifier = ','.join(vpc_zone_identifier) + self.vpc_zone_identifier = vpc_zone_identifier + self.instances = None + self.tags = tags or None + termination_policies = termination_policies or [] + self.termination_policies = ListElement(termination_policies) + self.instance_id = instance_id + + # backwards compatible access to 'cooldown' param + def _get_cooldown(self): + return self.default_cooldown + + def _set_cooldown(self, val): + self.default_cooldown = val + + cooldown = property(_get_cooldown, _set_cooldown) + + def __repr__(self): + return 'AutoScaleGroup<%s>' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', Instance)]) + return self.instances + elif name == 'LoadBalancerNames': + return self.load_balancers + elif name == 'AvailabilityZones': + return self.availability_zones + elif name == 'EnabledMetrics': + self.enabled_metrics = ResultSet([('member', EnabledMetric)]) + return self.enabled_metrics + elif name == 'SuspendedProcesses': + self.suspended_processes = ResultSet([('member', SuspendedProcess)]) + return self.suspended_processes + elif name == 'Tags': + self.tags = ResultSet([('member', Tag)]) + return self.tags + elif name == 'TerminationPolicies': + return self.termination_policies + else: + return + + def endElement(self, name, value, connection): + if name == 'MinSize': + self.min_size = int(value) + elif name == 'AutoScalingGroupARN': + self.autoscaling_group_arn = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'DefaultCooldown': + self.default_cooldown = int(value) + elif name == 'LaunchConfigurationName': + self.launch_config_name = value + elif name == 'DesiredCapacity': + self.desired_capacity = int(value) + elif name == 'MaxSize': + self.max_size = int(value) + elif name == 'AutoScalingGroupName': + self.name = value + elif name == 'PlacementGroup': + self.placement_group = value + elif name == 'HealthCheckGracePeriod': + try: + self.health_check_period = int(value) + except ValueError: + self.health_check_period = None + elif name == 'HealthCheckType': + self.health_check_type = value + elif name == 'VPCZoneIdentifier': + self.vpc_zone_identifier = value + elif name == 'InstanceId': + self.instance_id = value + else: + setattr(self, name, value) + + def set_capacity(self, capacity): + """ + Set the desired capacity for the group. + """ + params = {'AutoScalingGroupName': self.name, + 'DesiredCapacity': capacity} + req = self.connection.get_object('SetDesiredCapacity', params, + Request) + self.connection.last_request = req + return req + + def update(self): + """ + Sync local changes with AutoScaling group. + """ + return self.connection._update_group('UpdateAutoScalingGroup', self) + + def shutdown_instances(self): + """ + Convenience method which shuts down all instances associated with + this group. + """ + self.min_size = 0 + self.max_size = 0 + self.desired_capacity = 0 + self.update() + + def delete(self, force_delete=False): + """ + Delete this auto-scaling group if no instances attached or no + scaling activities in progress. + """ + return self.connection.delete_auto_scaling_group(self.name, + force_delete) + + def get_activities(self, activity_ids=None, max_records=50): + """ + Get all activies for this group. + """ + return self.connection.get_all_activities(self, activity_ids, + max_records) + + def put_notification_configuration(self, topic, notification_types): + """ + Configures an Auto Scaling group to send notifications when + specified events take place. Valid notification types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' + """ + return self.connection.put_notification_configuration(self, + topic, + notification_types) + + def delete_notification_configuration(self, topic): + """ + Deletes notifications created by put_notification_configuration. + """ + return self.connection.delete_notification_configuration(self, topic) + + def suspend_processes(self, scaling_processes=None): + """ + Suspends Auto Scaling processes for an Auto Scaling group. + """ + return self.connection.suspend_processes(self.name, scaling_processes) + + def resume_processes(self, scaling_processes=None): + """ + Resumes Auto Scaling processes for an Auto Scaling group. + """ + return self.connection.resume_processes(self.name, scaling_processes) + + +class AutoScalingGroupMetric(object): + def __init__(self, connection=None): + + self.connection = connection + self.metric = None + self.granularity = None + + def __repr__(self): + return 'AutoScalingGroupMetric:%s' % self.metric + + def startElement(self, name, attrs, connection): + return + + def endElement(self, name, value, connection): + if name == 'Metric': + self.metric = value + elif name == 'Granularity': + self.granularity = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/autoscale/instance.py b/ext/boto/ec2/autoscale/instance.py new file mode 100644 index 0000000000..6095c17be5 --- /dev/null +++ b/ext/boto/ec2/autoscale/instance.py @@ -0,0 +1,59 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Instance(object): + def __init__(self, connection=None): + self.connection = connection + self.instance_id = None + self.health_status = None + self.launch_config_name = None + self.lifecycle_state = None + self.availability_zone = None + self.group_name = None + + def __repr__(self): + r = 'Instance' % (self.metrics, self.granularities) + + def startElement(self, name, attrs, connection): + if name == 'Granularities': + self.granularities = ResultSet([('member', self.Granularity)]) + return self.granularities + elif name == 'Metrics': + self.metrics = ResultSet([('member', self.Metric)]) + return self.metrics + + def endElement(self, name, value, connection): + return + + +class ScalingPolicy(object): + def __init__(self, connection=None, **kwargs): + """ + Scaling Policy + + :type name: str + :param name: Name of scaling policy. + + :type adjustment_type: str + :param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`. + + :type as_name: str or int + :param as_name: Name or ARN of the Auto Scaling Group. + + :type scaling_adjustment: int + :param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`). + + :type min_adjustment_step: int + :param min_adjustment_step: Value of min adjustment step required to + apply the scaling policy (only make sense when use `PercentChangeInCapacity` as adjustment_type.). + + :type cooldown: int + :param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends. + + """ + self.name = kwargs.get('name', None) + self.adjustment_type = kwargs.get('adjustment_type', None) + self.as_name = kwargs.get('as_name', None) + self.scaling_adjustment = kwargs.get('scaling_adjustment', None) + self.cooldown = kwargs.get('cooldown', None) + self.connection = connection + self.min_adjustment_step = kwargs.get('min_adjustment_step', None) + + def __repr__(self): + return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name, + self.as_name, + self.adjustment_type) + + def startElement(self, name, attrs, connection): + if name == 'Alarms': + self.alarms = ResultSet([('member', Alarm)]) + return self.alarms + + def endElement(self, name, value, connection): + if name == 'PolicyName': + self.name = value + elif name == 'AutoScalingGroupName': + self.as_name = value + elif name == 'PolicyARN': + self.policy_arn = value + elif name == 'ScalingAdjustment': + self.scaling_adjustment = int(value) + elif name == 'Cooldown': + self.cooldown = int(value) + elif name == 'AdjustmentType': + self.adjustment_type = value + elif name == 'MinAdjustmentStep': + self.min_adjustment_step = int(value) + + def delete(self): + return self.connection.delete_policy(self.name, self.as_name) + + +class TerminationPolicies(list): + def __init__(self, connection=None, **kwargs): + pass + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/ext/boto/ec2/autoscale/request.py b/ext/boto/ec2/autoscale/request.py new file mode 100644 index 0000000000..b17b534fed --- /dev/null +++ b/ext/boto/ec2/autoscale/request.py @@ -0,0 +1,38 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Request(object): + def __init__(self, connection=None): + self.connection = connection + self.request_id = '' + + def __repr__(self): + return 'Request:%s' % self.request_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'RequestId': + self.request_id = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/autoscale/scheduled.py b/ext/boto/ec2/autoscale/scheduled.py new file mode 100644 index 0000000000..8d2eda407d --- /dev/null +++ b/ext/boto/ec2/autoscale/scheduled.py @@ -0,0 +1,77 @@ +# Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from datetime import datetime + + +class ScheduledUpdateGroupAction(object): + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.action_arn = None + self.as_group = None + self.time = None + self.start_time = None + self.end_time = None + self.recurrence = None + self.desired_capacity = None + self.max_size = None + self.min_size = None + + def __repr__(self): + return 'ScheduledUpdateGroupAction:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DesiredCapacity': + self.desired_capacity = value + elif name == 'ScheduledActionName': + self.name = value + elif name == 'AutoScalingGroupName': + self.as_group = value + elif name == 'MaxSize': + self.max_size = int(value) + elif name == 'MinSize': + self.min_size = int(value) + elif name == 'ScheduledActionARN': + self.action_arn = value + elif name == 'Recurrence': + self.recurrence = value + elif name == 'Time': + try: + self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'StartTime': + try: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name == 'EndTime': + try: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/autoscale/tag.py b/ext/boto/ec2/autoscale/tag.py new file mode 100644 index 0000000000..a783edf096 --- /dev/null +++ b/ext/boto/ec2/autoscale/tag.py @@ -0,0 +1,84 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Tag(object): + """ + A name/value tag on an AutoScalingGroup resource. + + :ivar key: The key of the tag. + :ivar value: The value of the tag. + :ivar propagate_at_launch: Boolean value which specifies whether the + new tag will be applied to instances launched after the tag is created. + :ivar resource_id: The name of the autoscaling group. + :ivar resource_type: The only supported resource type at this time + is "auto-scaling-group". + """ + + def __init__(self, connection=None, key=None, value=None, + propagate_at_launch=False, resource_id=None, + resource_type='auto-scaling-group'): + self.connection = connection + self.key = key + self.value = value + self.propagate_at_launch = propagate_at_launch + self.resource_id = resource_id + self.resource_type = resource_type + + def __repr__(self): + return 'Tag(%s=%s)' % (self.key, self.value) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'Value': + self.value = value + elif name == 'PropagateAtLaunch': + if value.lower() == 'true': + self.propagate_at_launch = True + else: + self.propagate_at_launch = False + elif name == 'ResourceId': + self.resource_id = value + elif name == 'ResourceType': + self.resource_type = value + + def build_params(self, params, i): + """ + Populates a dictionary with the name/value pairs necessary + to identify this Tag in a request. + """ + prefix = 'Tags.member.%d.' % i + params[prefix + 'ResourceId'] = self.resource_id + params[prefix + 'ResourceType'] = self.resource_type + params[prefix + 'Key'] = self.key + params[prefix + 'Value'] = self.value + if self.propagate_at_launch: + params[prefix + 'PropagateAtLaunch'] = 'true' + else: + params[prefix + 'PropagateAtLaunch'] = 'false' + + def delete(self): + return self.connection.delete_tags([self]) diff --git a/ext/boto/ec2/blockdevicemapping.py b/ext/boto/ec2/blockdevicemapping.py new file mode 100644 index 0000000000..2f4e1faf4b --- /dev/null +++ b/ext/boto/ec2/blockdevicemapping.py @@ -0,0 +1,165 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class BlockDeviceType(object): + """ + Represents parameters for a block device. + """ + + def __init__(self, + connection=None, + ephemeral_name=None, + no_device=False, + volume_id=None, + snapshot_id=None, + status=None, + attach_time=None, + delete_on_termination=False, + size=None, + volume_type=None, + iops=None, + encrypted=None): + self.connection = connection + self.ephemeral_name = ephemeral_name + self.no_device = no_device + self.volume_id = volume_id + self.snapshot_id = snapshot_id + self.status = status + self.attach_time = attach_time + self.delete_on_termination = delete_on_termination + self.size = size + self.volume_type = volume_type + self.iops = iops + self.encrypted = encrypted + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + lname = name.lower() + if name == 'volumeId': + self.volume_id = value + elif lname == 'virtualname': + self.ephemeral_name = value + elif lname == 'nodevice': + self.no_device = (value == 'true') + elif lname == 'snapshotid': + self.snapshot_id = value + elif lname == 'volumesize': + self.size = int(value) + elif lname == 'status': + self.status = value + elif lname == 'attachtime': + self.attach_time = value + elif lname == 'deleteontermination': + self.delete_on_termination = (value == 'true') + elif lname == 'volumetype': + self.volume_type = value + elif lname == 'iops': + self.iops = int(value) + elif lname == 'encrypted': + self.encrypted = (value == 'true') + else: + setattr(self, name, value) + +# for backwards compatibility +EBSBlockDeviceType = BlockDeviceType + + +class BlockDeviceMapping(dict): + """ + Represents a collection of BlockDeviceTypes when creating ec2 instances. + + Example: + dev_sda1 = BlockDeviceType() + dev_sda1.size = 100 # change root volume to 100GB instead of default + bdm = BlockDeviceMapping() + bdm['/dev/sda1'] = dev_sda1 + reservation = image.run(..., block_device_map=bdm, ...) + """ + + def __init__(self, connection=None): + """ + :type connection: :class:`boto.ec2.EC2Connection` + :param connection: Optional connection. + """ + dict.__init__(self) + self.connection = connection + self.current_name = None + self.current_value = None + + def startElement(self, name, attrs, connection): + lname = name.lower() + if lname in ['ebs', 'virtualname']: + self.current_value = BlockDeviceType(self) + return self.current_value + + def endElement(self, name, value, connection): + lname = name.lower() + if lname in ['device', 'devicename']: + self.current_name = value + elif lname in ['item', 'member']: + self[self.current_name] = self.current_value + + def ec2_build_list_params(self, params, prefix=''): + pre = '%sBlockDeviceMapping' % prefix + return self._build_list_params(params, prefix=pre) + + def autoscale_build_list_params(self, params, prefix=''): + pre = '%sBlockDeviceMappings.member' % prefix + return self._build_list_params(params, prefix=pre) + + def _build_list_params(self, params, prefix=''): + i = 1 + for dev_name in self: + pre = '%s.%d' % (prefix, i) + params['%s.DeviceName' % pre] = dev_name + block_dev = self[dev_name] + if block_dev.ephemeral_name: + params['%s.VirtualName' % pre] = block_dev.ephemeral_name + else: + if block_dev.no_device: + params['%s.NoDevice' % pre] = '' + else: + if block_dev.snapshot_id: + params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id + if block_dev.size: + params['%s.Ebs.VolumeSize' % pre] = block_dev.size + if block_dev.delete_on_termination: + params['%s.Ebs.DeleteOnTermination' % pre] = 'true' + else: + params['%s.Ebs.DeleteOnTermination' % pre] = 'false' + if block_dev.volume_type: + params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type + if block_dev.iops is not None: + params['%s.Ebs.Iops' % pre] = block_dev.iops + # The encrypted flag (even if False) cannot be specified for the root EBS + # volume. + if block_dev.encrypted is not None: + if block_dev.encrypted: + params['%s.Ebs.Encrypted' % pre] = 'true' + else: + params['%s.Ebs.Encrypted' % pre] = 'false' + + i += 1 diff --git a/ext/boto/ec2/bundleinstance.py b/ext/boto/ec2/bundleinstance.py new file mode 100644 index 0000000000..e241da9adf --- /dev/null +++ b/ext/boto/ec2/bundleinstance.py @@ -0,0 +1,78 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Bundle Task +""" + +from boto.ec2.ec2object import EC2Object + + +class BundleInstanceTask(EC2Object): + + def __init__(self, connection=None): + super(BundleInstanceTask, self).__init__(connection) + self.id = None + self.instance_id = None + self.progress = None + self.start_time = None + self.state = None + self.bucket = None + self.prefix = None + self.upload_policy = None + self.upload_policy_signature = None + self.update_time = None + self.code = None + self.message = None + + def __repr__(self): + return 'BundleInstanceTask:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'bundleId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'progress': + self.progress = value + elif name == 'startTime': + self.start_time = value + elif name == 'state': + self.state = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'uploadPolicy': + self.upload_policy = value + elif name == 'uploadPolicySignature': + self.upload_policy_signature = value + elif name == 'updateTime': + self.update_time = value + elif name == 'code': + self.code = value + elif name == 'message': + self.message = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/buyreservation.py b/ext/boto/ec2/buyreservation.py new file mode 100644 index 0000000000..786d0fed28 --- /dev/null +++ b/ext/boto/ec2/buyreservation.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto.ec2 +from boto.sdb.db.property import StringProperty, IntegerProperty +from boto.manage import propget +from boto.compat import six + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', + 'c1.medium', 'c1.xlarge', 'm2.xlarge', + 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', + 't1.micro'] + + +class BuyReservation(object): + + def get_region(self, params): + if not params.get('region', None): + prop = StringProperty(name='region', verbose_name='EC2 Region', + choices=boto.ec2.regions) + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get(self, params): + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + +if __name__ == "__main__": + obj = BuyReservation() + params = {} + obj.get(params) + offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'], + availability_zone=params['zone'].name) + print('\nThe following Reserved Instances Offerings are available:\n') + for offering in offerings: + offering.describe() + prop = StringProperty(name='offering', verbose_name='Offering', + choices=offerings) + offering = propget.get(prop) + print('\nYou have chosen this offering:') + offering.describe() + unit_price = float(offering.fixed_price) + total_price = unit_price * params['quantity'] + print('!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)) + answer = six.moves.input('Are you sure you want to do this? If so, enter YES: ') + if answer.strip().lower() == 'yes': + offering.purchase(params['quantity']) + else: + print('Purchase cancelled') diff --git a/ext/boto/ec2/cloudwatch/__init__.py b/ext/boto/ec2/cloudwatch/__init__.py new file mode 100644 index 0000000000..b53d56957f --- /dev/null +++ b/ext/boto/ec2/cloudwatch/__init__.py @@ -0,0 +1,593 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +CloudWatch service from AWS. +""" +from boto.compat import json, map, six, zip +from boto.connection import AWSQueryConnection +from boto.ec2.cloudwatch.metric import Metric +from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem +from boto.ec2.cloudwatch.datapoint import Datapoint +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.regioninfo import connect +import boto + +RegionData = load_regions().get('cloudwatch', {}) + + +def regions(): + """ + Get all available regions for the CloudWatch service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('cloudwatch', connection_cls=CloudWatchConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.cloudwatch.CloudWatchConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.CloudWatchConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('cloudwatch', region_name, + connection_cls=CloudWatchConnection, **kw_params) + + +class CloudWatchConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01') + DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name', + 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', + 'cloudwatch_region_endpoint', + 'monitoring.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2 Monitoring Service. + + B{Note:} The host argument is overridden by the host specified in the + boto configuration file. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + + # Ugly hack to get around both a bug in Python and a + # misconfigured SSL cert for the eu-west-1 endpoint + if self.region.name == 'eu-west-1': + validate_certs = False + + super(CloudWatchConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_dimension_param(self, dimension, params): + prefix = 'Dimensions.member' + i = 0 + for dim_name in dimension: + dim_value = dimension[dim_name] + if dim_value: + if isinstance(dim_value, six.string_types): + dim_value = [dim_value] + for value in dim_value: + params['%s.%d.Name' % (prefix, i + 1)] = dim_name + params['%s.%d.Value' % (prefix, i + 1)] = value + i += 1 + else: + params['%s.%d.Name' % (prefix, i + 1)] = dim_name + i += 1 + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for index, item in enumerate(items): + i = index + 1 + if isinstance(item, dict): + for k, v in six.iteritems(item): + params[label % (i, 'Name')] = k + if v is not None: + params[label % (i, 'Value')] = v + else: + params[label % i] = item + + def build_put_params(self, params, name, value=None, timestamp=None, + unit=None, dimensions=None, statistics=None): + args = (name, value, unit, dimensions, statistics, timestamp) + length = max(map(lambda a: len(a) if isinstance(a, list) else 1, args)) + + def aslist(a): + if isinstance(a, list): + if len(a) != length: + raise Exception('Must specify equal number of elements; expected %d.' % length) + return a + return [a] * length + + for index, (n, v, u, d, s, t) in enumerate(zip(*map(aslist, args))): + metric_data = {'MetricName': n} + + if timestamp: + metric_data['Timestamp'] = t.isoformat() + + if unit: + metric_data['Unit'] = u + + if dimensions: + self.build_dimension_param(d, metric_data) + + if statistics: + metric_data['StatisticValues.Maximum'] = s['maximum'] + metric_data['StatisticValues.Minimum'] = s['minimum'] + metric_data['StatisticValues.SampleCount'] = s['samplecount'] + metric_data['StatisticValues.Sum'] = s['sum'] + if value is not None: + msg = 'You supplied a value and statistics for a ' + \ + 'metric.Posting statistics and not value.' + boto.log.warn(msg) + elif value is not None: + metric_data['Value'] = v + else: + raise Exception('Must specify a value or statistics to put.') + + for key, val in six.iteritems(metric_data): + params['MetricData.member.%d.%s' % (index + 1, key)] = val + + def get_metric_statistics(self, period, start_time, end_time, metric_name, + namespace, statistics, dimensions=None, + unit=None): + """ + Get time-series data for one or more statistics of a given metric. + + :type period: integer + :param period: The granularity, in seconds, of the returned datapoints. + Period must be at least 60 seconds and must be a multiple + of 60. The default value is 60. + + :type start_time: datetime + :param start_time: The time stamp to use for determining the + first datapoint to return. The value specified is + inclusive; results include datapoints with the time stamp + specified. + + :type end_time: datetime + :param end_time: The time stamp to use for determining the + last datapoint to return. The value specified is + exclusive; results will include datapoints up to the time + stamp specified. + + :type metric_name: string + :param metric_name: The metric name. + + :type namespace: string + :param namespace: The metric's namespace. + + :type statistics: list + :param statistics: A list of statistics names Valid values: + Average | Sum | SampleCount | Maximum | Minimum + + :type dimensions: dict + :param dimensions: A dictionary of dimension key/values where + the key is the dimension name and the value + is either a scalar value or an iterator + of values to be associated with that + dimension. + + :type unit: string + :param unit: The unit for the metric. Value values are: + Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :rtype: list + """ + params = {'Period': period, + 'MetricName': metric_name, + 'Namespace': namespace, + 'StartTime': start_time.isoformat(), + 'EndTime': end_time.isoformat()} + self.build_list_params(params, statistics, 'Statistics.member.%d') + if dimensions: + self.build_dimension_param(dimensions, params) + if unit: + params['Unit'] = unit + return self.get_list('GetMetricStatistics', params, + [('member', Datapoint)]) + + def list_metrics(self, next_token=None, dimensions=None, + metric_name=None, namespace=None): + """ + Returns a list of the valid metrics for which there is recorded + data available. + + :type next_token: str + :param next_token: A maximum of 500 metrics will be returned + at one time. If more results are available, the ResultSet + returned will contain a non-Null next_token attribute. + Passing that token as a parameter to list_metrics will + retrieve the next page of metrics. + + :type dimensions: dict + :param dimensions: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on or None if you want all + metrics with that Dimension name. To be included in the + result a metric must contain all specified dimensions, + although the metric may contain additional dimensions beyond + the requested metrics. The Dimension names, and values must + be strings between 1 and 250 characters long. A maximum of + 10 dimensions are allowed. + + :type metric_name: str + :param metric_name: The name of the Metric to filter against. If None, + all Metric names will be returned. + + :type namespace: str + :param namespace: A Metric namespace to filter against (e.g. AWS/EC2). + If None, Metrics from all namespaces will be returned. + """ + params = {} + if next_token: + params['NextToken'] = next_token + if dimensions: + self.build_dimension_param(dimensions, params) + if metric_name: + params['MetricName'] = metric_name + if namespace: + params['Namespace'] = namespace + + return self.get_list('ListMetrics', params, [('member', Metric)]) + + def put_metric_data(self, namespace, name, value=None, timestamp=None, + unit=None, dimensions=None, statistics=None): + """ + Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch + associates the data points with the specified metric. If the specified + metric does not exist, Amazon CloudWatch creates the metric. If a list + is specified for some, but not all, of the arguments, the remaining + arguments are repeated a corresponding number of times. + + :type namespace: str + :param namespace: The namespace of the metric. + + :type name: str or list + :param name: The name of the metric. + + :type value: float or list + :param value: The value for the metric. + + :type timestamp: datetime or list + :param timestamp: The time stamp used for the metric. If not specified, + the default value is set to the time the metric data was received. + + :type unit: string or list + :param unit: The unit of the metric. Valid Values: Seconds | + Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :type dimensions: dict + :param dimensions: Add extra name value pairs to associate + with the metric, i.e.: + {'name1': value1, 'name2': (value2, value3)} + + :type statistics: dict or list + :param statistics: Use a statistic set instead of a value, for example:: + + {'maximum': 30, 'minimum': 1, 'samplecount': 100, 'sum': 10000} + """ + params = {'Namespace': namespace} + self.build_put_params(params, name, value=value, timestamp=timestamp, + unit=unit, dimensions=dimensions, statistics=statistics) + + return self.get_status('PutMetricData', params, verb="POST") + + def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, + alarm_names=None, max_records=None, state_value=None, + next_token=None): + """ + Retrieves alarms with the specified names. If no name is specified, all + alarms for the user are returned. Alarms can be retrieved by using only + a prefix for the alarm name, the alarm state, or a prefix for any + action. + + :type action_prefix: string + :param action_prefix: The action name prefix. + + :type alarm_name_prefix: string + :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot + be specified if this parameter is specified. + + :type alarm_names: list + :param alarm_names: A list of alarm names to retrieve information for. + + :type max_records: int + :param max_records: The maximum number of alarm descriptions + to retrieve. + + :type state_value: string + :param state_value: The state value to be used in matching alarms. + + :type next_token: string + :param next_token: The token returned by a previous call to + indicate that there is more data. + + :rtype list + """ + params = {} + if action_prefix: + params['ActionPrefix'] = action_prefix + if alarm_name_prefix: + params['AlarmNamePrefix'] = alarm_name_prefix + elif alarm_names: + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + if state_value: + params['StateValue'] = state_value + + result = self.get_list('DescribeAlarms', params, + [('MetricAlarms', MetricAlarms)]) + ret = result[0] + ret.next_token = result.next_token + return ret + + def describe_alarm_history(self, alarm_name=None, + start_date=None, end_date=None, + max_records=None, history_item_type=None, + next_token=None): + """ + Retrieves history for the specified alarm. Filter alarms by date range + or item type. If an alarm name is not specified, Amazon CloudWatch + returns histories for all of the owner's alarms. + + Amazon CloudWatch retains the history of deleted alarms for a period of + six weeks. If an alarm has been deleted, its history can still be + queried. + + :type alarm_name: string + :param alarm_name: The name of the alarm. + + :type start_date: datetime + :param start_date: The starting date to retrieve alarm history. + + :type end_date: datetime + :param end_date: The starting date to retrieve alarm history. + + :type history_item_type: string + :param history_item_type: The type of alarm histories to retreive + (ConfigurationUpdate | StateUpdate | Action) + + :type max_records: int + :param max_records: The maximum number of alarm descriptions + to retrieve. + + :type next_token: string + :param next_token: The token returned by a previous call to indicate + that there is more data. + + :rtype list + """ + params = {} + if alarm_name: + params['AlarmName'] = alarm_name + if start_date: + params['StartDate'] = start_date.isoformat() + if end_date: + params['EndDate'] = end_date.isoformat() + if history_item_type: + params['HistoryItemType'] = history_item_type + if max_records: + params['MaxRecords'] = max_records + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeAlarmHistory', params, + [('member', AlarmHistoryItem)]) + + def describe_alarms_for_metric(self, metric_name, namespace, period=None, + statistic=None, dimensions=None, unit=None): + """ + Retrieves all alarms for a single metric. Specify a statistic, period, + or unit to filter the set of alarms further. + + :type metric_name: string + :param metric_name: The name of the metric. + + :type namespace: string + :param namespace: The namespace of the metric. + + :type period: int + :param period: The period in seconds over which the statistic + is applied. + + :type statistic: string + :param statistic: The statistic for the metric. + + :type dimensions: dict + :param dimensions: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on, a list of values to + filter on or None if you want all metrics with that + Dimension name. + + :type unit: string + + :rtype list + """ + params = {'MetricName': metric_name, + 'Namespace': namespace} + if period: + params['Period'] = period + if statistic: + params['Statistic'] = statistic + if dimensions: + self.build_dimension_param(dimensions, params) + if unit: + params['Unit'] = unit + return self.get_list('DescribeAlarmsForMetric', params, + [('member', MetricAlarm)]) + + def put_metric_alarm(self, alarm): + """ + Creates or updates an alarm and associates it with the specified Amazon + CloudWatch metric. Optionally, this operation can associate one or more + Amazon Simple Notification Service resources with the alarm. + + When this operation creates an alarm, the alarm state is immediately + set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is + set appropriately. Any actions associated with the StateValue is then + executed. + + When updating an existing alarm, its StateValue is left unchanged. + + :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm + :param alarm: MetricAlarm object. + """ + params = { + 'AlarmName': alarm.name, + 'MetricName': alarm.metric, + 'Namespace': alarm.namespace, + 'Statistic': alarm.statistic, + 'ComparisonOperator': alarm.comparison, + 'Threshold': alarm.threshold, + 'EvaluationPeriods': alarm.evaluation_periods, + 'Period': alarm.period, + } + if alarm.actions_enabled is not None: + params['ActionsEnabled'] = alarm.actions_enabled + if alarm.alarm_actions: + self.build_list_params(params, alarm.alarm_actions, + 'AlarmActions.member.%s') + if alarm.description: + params['AlarmDescription'] = alarm.description + if alarm.dimensions: + self.build_dimension_param(alarm.dimensions, params) + if alarm.insufficient_data_actions: + self.build_list_params(params, alarm.insufficient_data_actions, + 'InsufficientDataActions.member.%s') + if alarm.ok_actions: + self.build_list_params(params, alarm.ok_actions, + 'OKActions.member.%s') + if alarm.unit: + params['Unit'] = alarm.unit + alarm.connection = self + return self.get_status('PutMetricAlarm', params) + create_alarm = put_metric_alarm + update_alarm = put_metric_alarm + + def delete_alarms(self, alarms): + """ + Deletes all specified alarms. In the event of an error, no + alarms are deleted. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarms, 'AlarmNames.member.%s') + return self.get_status('DeleteAlarms', params) + + def set_alarm_state(self, alarm_name, state_reason, state_value, + state_reason_data=None): + """ + Temporarily sets the state of an alarm. When the updated StateValue + differs from the previous value, the action configured for the + appropriate state is invoked. This is not a permanent change. The next + periodic alarm check (in about a minute) will set the alarm to its + actual state. + + :type alarm_name: string + :param alarm_name: Descriptive name for alarm. + + :type state_reason: string + :param state_reason: Human readable reason. + + :type state_value: string + :param state_value: OK | ALARM | INSUFFICIENT_DATA + + :type state_reason_data: string + :param state_reason_data: Reason string (will be jsonified). + """ + params = {'AlarmName': alarm_name, + 'StateReason': state_reason, + 'StateValue': state_value} + if state_reason_data: + params['StateReasonData'] = json.dumps(state_reason_data) + + return self.get_status('SetAlarmState', params) + + def enable_alarm_actions(self, alarm_names): + """ + Enables actions for the specified alarms. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + return self.get_status('EnableAlarmActions', params) + + def disable_alarm_actions(self, alarm_names): + """ + Disables actions for the specified alarms. + + :type alarms: list + :param alarms: List of alarm names. + """ + params = {} + self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') + return self.get_status('DisableAlarmActions', params) diff --git a/ext/boto/ec2/cloudwatch/alarm.py b/ext/boto/ec2/cloudwatch/alarm.py new file mode 100644 index 0000000000..c267ab0332 --- /dev/null +++ b/ext/boto/ec2/cloudwatch/alarm.py @@ -0,0 +1,323 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from datetime import datetime +from boto.ec2.cloudwatch.listelement import ListElement +from boto.ec2.cloudwatch.dimension import Dimension +from boto.compat import json +from boto.compat import six + + +class MetricAlarms(list): + def __init__(self, connection=None): + """ + Parses a list of MetricAlarms. + """ + list.__init__(self) + self.connection = connection + + def startElement(self, name, attrs, connection): + if name == 'member': + metric_alarm = MetricAlarm(connection) + self.append(metric_alarm) + return metric_alarm + + def endElement(self, name, value, connection): + pass + + +class MetricAlarm(object): + + OK = 'OK' + ALARM = 'ALARM' + INSUFFICIENT_DATA = 'INSUFFICIENT_DATA' + + _cmp_map = { + '>=': 'GreaterThanOrEqualToThreshold', + '>': 'GreaterThanThreshold', + '<': 'LessThanThreshold', + '<=': 'LessThanOrEqualToThreshold', + } + _rev_cmp_map = dict((v, k) for (k, v) in six.iteritems(_cmp_map)) + + def __init__(self, connection=None, name=None, metric=None, + namespace=None, statistic=None, comparison=None, + threshold=None, period=None, evaluation_periods=None, + unit=None, description='', dimensions=None, + alarm_actions=None, insufficient_data_actions=None, + ok_actions=None): + """ + Creates a new Alarm. + + :type name: str + :param name: Name of alarm. + + :type metric: str + :param metric: Name of alarm's associated metric. + + :type namespace: str + :param namespace: The namespace for the alarm's metric. + + :type statistic: str + :param statistic: The statistic to apply to the alarm's associated + metric. + Valid values: SampleCount|Average|Sum|Minimum|Maximum + + :type comparison: str + :param comparison: Comparison used to compare statistic with threshold. + Valid values: >= | > | < | <= + + :type threshold: float + :param threshold: The value against which the specified statistic + is compared. + + :type period: int + :param period: The period in seconds over which the specified + statistic is applied. + + :type evaluation_periods: int + :param evaluation_periods: The number of periods over which data is + compared to the specified threshold. + + :type unit: str + :param unit: Allowed Values are: + Seconds|Microseconds|Milliseconds, + Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes, + Bits|Kilobits|Megabits|Gigabits|Terabits, + Percent|Count| + Bytes/Second|Kilobytes/Second|Megabytes/Second| + Gigabytes/Second|Terabytes/Second, + Bits/Second|Kilobits/Second|Megabits/Second, + Gigabits/Second|Terabits/Second|Count/Second|None + + :type description: str + :param description: Description of MetricAlarm + + :type dimensions: dict + :param dimensions: A dictionary of dimension key/values where + the key is the dimension name and the value + is either a scalar value or an iterator + of values to be associated with that + dimension. + Example: { + 'InstanceId': ['i-0123456', 'i-0123457'], + 'LoadBalancerName': 'test-lb' + } + + :type alarm_actions: list of strs + :param alarm_actions: A list of the ARNs of the actions to take in + ALARM state + + :type insufficient_data_actions: list of strs + :param insufficient_data_actions: A list of the ARNs of the actions to + take in INSUFFICIENT_DATA state + + :type ok_actions: list of strs + :param ok_actions: A list of the ARNs of the actions to take in OK state + """ + self.name = name + self.connection = connection + self.metric = metric + self.namespace = namespace + self.statistic = statistic + if threshold is not None: + self.threshold = float(threshold) + else: + self.threshold = None + self.comparison = self._cmp_map.get(comparison) + if period is not None: + self.period = int(period) + else: + self.period = None + if evaluation_periods is not None: + self.evaluation_periods = int(evaluation_periods) + else: + self.evaluation_periods = None + self.actions_enabled = None + self.alarm_arn = None + self.last_updated = None + self.description = description + self.dimensions = dimensions + self.state_reason = None + self.state_value = None + self.unit = unit + self.alarm_actions = alarm_actions + self.insufficient_data_actions = insufficient_data_actions + self.ok_actions = ok_actions + + def __repr__(self): + return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric, + self.statistic, + self.comparison, + self.threshold) + + def startElement(self, name, attrs, connection): + if name == 'AlarmActions': + self.alarm_actions = ListElement() + return self.alarm_actions + elif name == 'InsufficientDataActions': + self.insufficient_data_actions = ListElement() + return self.insufficient_data_actions + elif name == 'OKActions': + self.ok_actions = ListElement() + return self.ok_actions + elif name == 'Dimensions': + self.dimensions = Dimension() + return self.dimensions + else: + pass + + def endElement(self, name, value, connection): + if name == 'ActionsEnabled': + self.actions_enabled = value + elif name == 'AlarmArn': + self.alarm_arn = value + elif name == 'AlarmConfigurationUpdatedTimestamp': + self.last_updated = value + elif name == 'AlarmDescription': + self.description = value + elif name == 'AlarmName': + self.name = value + elif name == 'ComparisonOperator': + setattr(self, 'comparison', self._rev_cmp_map[value]) + elif name == 'EvaluationPeriods': + self.evaluation_periods = int(value) + elif name == 'MetricName': + self.metric = value + elif name == 'Namespace': + self.namespace = value + elif name == 'Period': + self.period = int(value) + elif name == 'StateReason': + self.state_reason = value + elif name == 'StateValue': + self.state_value = value + elif name == 'Statistic': + self.statistic = value + elif name == 'Threshold': + self.threshold = float(value) + elif name == 'Unit': + self.unit = value + else: + setattr(self, name, value) + + def set_state(self, value, reason, data=None): + """ Temporarily sets the state of an alarm. + + :type value: str + :param value: OK | ALARM | INSUFFICIENT_DATA + + :type reason: str + :param reason: Reason alarm set (human readable). + + :type data: str + :param data: Reason data (will be jsonified). + """ + return self.connection.set_alarm_state(self.name, reason, value, data) + + def update(self): + return self.connection.update_alarm(self) + + def enable_actions(self): + return self.connection.enable_alarm_actions([self.name]) + + def disable_actions(self): + return self.connection.disable_alarm_actions([self.name]) + + def describe_history(self, start_date=None, end_date=None, max_records=None, + history_item_type=None, next_token=None): + return self.connection.describe_alarm_history(self.name, start_date, + end_date, max_records, + history_item_type, + next_token) + + def add_alarm_action(self, action_arn=None): + """ + Adds an alarm action, represented as an SNS topic, to this alarm. + What do do when alarm is triggered. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state ALARM. + """ + if not action_arn: + return # Raise exception instead? + self.actions_enabled = 'true' + self.alarm_actions.append(action_arn) + + def add_insufficient_data_action(self, action_arn=None): + """ + Adds an insufficient_data action, represented as an SNS topic, to + this alarm. What to do when the insufficient_data state is reached. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state INSUFFICIENT_DATA. + """ + if not action_arn: + return + self.actions_enabled = 'true' + self.insufficient_data_actions.append(action_arn) + + def add_ok_action(self, action_arn=None): + """ + Adds an ok action, represented as an SNS topic, to this alarm. What + to do when the ok state is reached. + + :type action_arn: str + :param action_arn: SNS topics to which notification should be + sent if the alarm goes to state INSUFFICIENT_DATA. + """ + if not action_arn: + return + self.actions_enabled = 'true' + self.ok_actions.append(action_arn) + + def delete(self): + self.connection.delete_alarms([self.name]) + + +class AlarmHistoryItem(object): + def __init__(self, connection=None): + self.connection = connection + + def __repr__(self): + return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'AlarmName': + self.name = value + elif name == 'HistoryData': + self.data = json.loads(value) + elif name == 'HistoryItemType': + self.tem_type = value + elif name == 'HistorySummary': + self.summary = value + elif name == 'Timestamp': + try: + self.timestamp = datetime.strptime(value, + '%Y-%m-%dT%H:%M:%S.%fZ') + except ValueError: + self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') diff --git a/ext/boto/ec2/cloudwatch/datapoint.py b/ext/boto/ec2/cloudwatch/datapoint.py new file mode 100644 index 0000000000..94955acd56 --- /dev/null +++ b/ext/boto/ec2/cloudwatch/datapoint.py @@ -0,0 +1,40 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from datetime import datetime + + +class Datapoint(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ['Average', 'Maximum', 'Minimum', 'Sum', 'SampleCount']: + self[name] = float(value) + elif name == 'Timestamp': + self[name] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ') + elif name != 'member': + self[name] = value diff --git a/ext/boto/ec2/cloudwatch/dimension.py b/ext/boto/ec2/cloudwatch/dimension.py new file mode 100644 index 0000000000..9ff4fb1160 --- /dev/null +++ b/ext/boto/ec2/cloudwatch/dimension.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class Dimension(dict): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Name': + self._name = value + elif name == 'Value': + if self._name in self: + self[self._name].append(value) + else: + self[self._name] = [value] + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/cloudwatch/listelement.py b/ext/boto/ec2/cloudwatch/listelement.py new file mode 100644 index 0000000000..6a2904181b --- /dev/null +++ b/ext/boto/ec2/cloudwatch/listelement.py @@ -0,0 +1,30 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ListElement(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/ext/boto/ec2/cloudwatch/metric.py b/ext/boto/ec2/cloudwatch/metric.py new file mode 100644 index 0000000000..15d1d968cc --- /dev/null +++ b/ext/boto/ec2/cloudwatch/metric.py @@ -0,0 +1,169 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.cloudwatch.alarm import MetricAlarm +from boto.ec2.cloudwatch.dimension import Dimension + + +class Metric(object): + + Statistics = ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'] + Units = ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', + 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', + 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', + 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', + 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', + 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', + 'Terabits/Second', 'Count/Second', None] + + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.namespace = None + self.dimensions = None + + def __repr__(self): + return 'Metric:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Dimensions': + self.dimensions = Dimension() + return self.dimensions + + def endElement(self, name, value, connection): + if name == 'MetricName': + self.name = value + elif name == 'Namespace': + self.namespace = value + else: + setattr(self, name, value) + + def query(self, start_time, end_time, statistics, unit=None, period=60): + """ + :type start_time: datetime + :param start_time: The time stamp to use for determining the + first datapoint to return. The value specified is + inclusive; results include datapoints with the time stamp + specified. + + :type end_time: datetime + :param end_time: The time stamp to use for determining the + last datapoint to return. The value specified is + exclusive; results will include datapoints up to the time + stamp specified. + + :type statistics: list + :param statistics: A list of statistics names Valid values: + Average | Sum | SampleCount | Maximum | Minimum + + :type unit: string + :param unit: The unit for the metric. Value values are: + Seconds | Microseconds | Milliseconds | Bytes | Kilobytes | + Megabytes | Gigabytes | Terabytes | Bits | Kilobits | + Megabits | Gigabits | Terabits | Percent | Count | + Bytes/Second | Kilobytes/Second | Megabytes/Second | + Gigabytes/Second | Terabytes/Second | Bits/Second | + Kilobits/Second | Megabits/Second | Gigabits/Second | + Terabits/Second | Count/Second | None + + :type period: integer + :param period: The granularity, in seconds, of the returned datapoints. + Period must be at least 60 seconds and must be a multiple + of 60. The default value is 60. + + """ + if not isinstance(statistics, list): + statistics = [statistics] + return self.connection.get_metric_statistics(period, + start_time, + end_time, + self.name, + self.namespace, + statistics, + self.dimensions, + unit) + + def create_alarm(self, name, comparison, threshold, + period, evaluation_periods, + statistic, enabled=True, description=None, + dimensions=None, alarm_actions=None, ok_actions=None, + insufficient_data_actions=None, unit=None): + """ + Creates or updates an alarm and associates it with this metric. + Optionally, this operation can associate one or more + Amazon Simple Notification Service resources with the alarm. + + When this operation creates an alarm, the alarm state is immediately + set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is + set appropriately. Any actions associated with the StateValue is then + executed. + + When updating an existing alarm, its StateValue is left unchanged. + + :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm + :param alarm: MetricAlarm object. + """ + if not dimensions: + dimensions = self.dimensions + alarm = MetricAlarm(self.connection, name, self.name, + self.namespace, statistic, comparison, + threshold, period, evaluation_periods, + unit, description, dimensions, + alarm_actions, insufficient_data_actions, + ok_actions) + if self.connection.put_metric_alarm(alarm): + return alarm + + def describe_alarms(self, period=None, statistic=None, + dimensions=None, unit=None): + """ + Retrieves all alarms for this metric. Specify a statistic, period, + or unit to filter the set of alarms further. + + :type period: int + :param period: The period in seconds over which the statistic + is applied. + + :type statistic: string + :param statistic: The statistic for the metric. + + :type dimensions: dict + :param dimension: A dictionary containing name/value + pairs that will be used to filter the results. The key in + the dictionary is the name of a Dimension. The value in + the dictionary is either a scalar value of that Dimension + name that you want to filter on, a list of values to + filter on or None if you want all metrics with that + Dimension name. + + :type unit: string + + :rtype list + """ + return self.connection.describe_alarms_for_metric(self.name, + self.namespace, + period, + statistic, + dimensions, + unit) diff --git a/ext/boto/ec2/connection.py b/ext/boto/ec2/connection.py new file mode 100644 index 0000000000..f1ce62d852 --- /dev/null +++ b/ext/boto/ec2/connection.py @@ -0,0 +1,4527 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +import base64 +import warnings +from datetime import datetime +from datetime import timedelta + +import boto +from boto.auth import detect_potential_sigv4 +from boto.connection import AWSQueryConnection +from boto.resultset import ResultSet +from boto.ec2.image import Image, ImageAttribute, CopyImage +from boto.ec2.instance import Reservation, Instance +from boto.ec2.instance import ConsoleOutput, InstanceAttribute +from boto.ec2.keypair import KeyPair +from boto.ec2.address import Address +from boto.ec2.volume import Volume, VolumeAttribute +from boto.ec2.snapshot import Snapshot +from boto.ec2.snapshot import SnapshotAttribute +from boto.ec2.zone import Zone +from boto.ec2.securitygroup import SecurityGroup +from boto.ec2.regioninfo import RegionInfo +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.reservedinstance import ReservedInstancesOffering +from boto.ec2.reservedinstance import ReservedInstance +from boto.ec2.reservedinstance import ReservedInstanceListing +from boto.ec2.reservedinstance import ReservedInstancesConfiguration +from boto.ec2.reservedinstance import ModifyReservedInstancesResult +from boto.ec2.reservedinstance import ReservedInstancesModification +from boto.ec2.spotinstancerequest import SpotInstanceRequest +from boto.ec2.spotpricehistory import SpotPriceHistory +from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription +from boto.ec2.bundleinstance import BundleInstanceTask +from boto.ec2.placementgroup import PlacementGroup +from boto.ec2.tag import Tag +from boto.ec2.instancetype import InstanceType +from boto.ec2.instancestatus import InstanceStatusSet +from boto.ec2.volumestatus import VolumeStatusSet +from boto.ec2.networkinterface import NetworkInterface +from boto.ec2.attributes import AccountAttribute, VPCAttribute +from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType +from boto.exception import EC2ResponseError +from boto.compat import six + +#boto.set_stream_logger('ec2') + + +class EC2Connection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'ec2_version', '2014-10-01') + DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', + 'ec2.us-east-1.amazonaws.com') + ResponseError = EC2ResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + api_version=None, security_token=None, + validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(EC2Connection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + if api_version: + self.APIVersion = api_version + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_params(self): + """ + Returns a dictionary containing the value of all of the keyword + arguments passed when constructing this connection. + """ + param_names = ['aws_access_key_id', 'aws_secret_access_key', + 'is_secure', 'port', 'proxy', 'proxy_port', + 'proxy_user', 'proxy_pass', + 'debug', 'https_connection_factory'] + params = {} + for name in param_names: + params[name] = getattr(self, name) + return params + + def build_filter_params(self, params, filters): + if not isinstance(filters, dict): + filters = dict(filters) + + i = 1 + for name in filters: + aws_name = name + if not aws_name.startswith('tag:'): + aws_name = name.replace('_', '-') + params['Filter.%d.Name' % i] = aws_name + value = filters[name] + if not isinstance(value, list): + value = [value] + j = 1 + for v in value: + params['Filter.%d.Value.%d' % (i, j)] = v + j += 1 + i += 1 + + # Image methods + + def get_all_images(self, image_ids=None, owners=None, + executable_by=None, filters=None, dry_run=False): + """ + Retrieve all the EC2 images available on your account. + + :type image_ids: list + :param image_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs, the special strings 'self', + 'amazon', and 'aws-marketplace', may be used to describe + images owned by you, Amazon or AWS Marketplace + respectively + + :type executable_by: list + :param executable_by: Returns AMIs for which the specified + user ID has explicit launch permissions + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if image_ids: + self.build_list_params(params, image_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + if executable_by: + self.build_list_params(params, executable_by, 'ExecutableBy') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_all_kernels(self, kernel_ids=None, owners=None, dry_run=False): + """ + Retrieve all the EC2 kernels available on your account. + Constructs a filter to allow the processing to happen server side. + + :type kernel_ids: list + :param kernel_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if kernel_ids: + self.build_list_params(params, kernel_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + filter = {'image-type': 'kernel'} + self.build_filter_params(params, filter) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_all_ramdisks(self, ramdisk_ids=None, owners=None, dry_run=False): + """ + Retrieve all the EC2 ramdisks available on your account. + Constructs a filter to allow the processing to happen server side. + + :type ramdisk_ids: list + :param ramdisk_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if ramdisk_ids: + self.build_list_params(params, ramdisk_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + filter = {'image-type': 'ramdisk'} + self.build_filter_params(params, filter) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeImages', params, + [('item', Image)], verb='POST') + + def get_image(self, image_id, dry_run=False): + """ + Shortcut method to retrieve a specific image (AMI). + + :type image_id: string + :param image_id: the ID of the Image to retrieve + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.Image` + :return: The EC2 Image specified or None if the image is not found + """ + try: + return self.get_all_images(image_ids=[image_id], dry_run=dry_run)[0] + except IndexError: # None of those images available + return None + + def register_image(self, name=None, description=None, image_location=None, + architecture=None, kernel_id=None, ramdisk_id=None, + root_device_name=None, block_device_map=None, + dry_run=False, virtualization_type=None, + sriov_net_support=None, + snapshot_id=None, + delete_root_volume_on_termination=False): + """ + Register an image. + + :type name: string + :param name: The name of the AMI. Valid only for EBS-based images. + + :type description: string + :param description: The description of the AMI. + + :type image_location: string + :param image_location: Full path to your AMI manifest in + Amazon S3 storage. Only used for S3-based AMI's. + + :type architecture: string + :param architecture: The architecture of the AMI. Valid choices are: + * i386 + * x86_64 + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch + the instances + + :type root_device_name: string + :param root_device_name: The root device name (e.g. /dev/sdh) + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type virtualization_type: string + :param virtualization_type: The virutalization_type of the image. + Valid choices are: + * paravirtual + * hvm + + :type sriov_net_support: string + :param sriov_net_support: Advanced networking support. + Valid choices are: + * simple + + :type snapshot_id: string + :param snapshot_id: A snapshot ID for the snapshot to be used + as root device for the image. Mutually exclusive with + block_device_map, requires root_device_name + + :type delete_root_volume_on_termination: bool + :param delete_root_volume_on_termination: Whether to delete the root + volume of the image after instance termination. Only applies when + creating image from snapshot_id. Defaults to False. Note that + leaving volumes behind after instance termination is not free. + + :rtype: string + :return: The new image id + """ + params = {} + if name: + params['Name'] = name + if description: + params['Description'] = description + if architecture: + params['Architecture'] = architecture + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if image_location: + params['ImageLocation'] = image_location + if root_device_name: + params['RootDeviceName'] = root_device_name + if snapshot_id: + root_vol = BlockDeviceType(snapshot_id=snapshot_id, + delete_on_termination=delete_root_volume_on_termination) + block_device_map = BlockDeviceMapping() + block_device_map[root_device_name] = root_vol + if block_device_map: + block_device_map.ec2_build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + if virtualization_type: + params['VirtualizationType'] = virtualization_type + if sriov_net_support: + params['SriovNetSupport'] = sriov_net_support + + rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') + image_id = getattr(rs, 'imageId', None) + return image_id + + def deregister_image(self, image_id, delete_snapshot=False, dry_run=False): + """ + Unregister an AMI. + + :type image_id: string + :param image_id: the ID of the Image to unregister + + :type delete_snapshot: bool + :param delete_snapshot: Set to True if we should delete the + snapshot associated with an EBS volume mounted at /dev/sda1 + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + snapshot_id = None + if delete_snapshot: + image = self.get_image(image_id) + for key in image.block_device_mapping: + if key == "/dev/sda1": + snapshot_id = image.block_device_mapping[key].snapshot_id + break + params = { + 'ImageId': image_id, + } + if dry_run: + params['DryRun'] = 'true' + result = self.get_status('DeregisterImage', + params, verb='POST') + if result and snapshot_id: + return result and self.delete_snapshot(snapshot_id) + return result + + def create_image(self, instance_id, name, + description=None, no_reboot=False, + block_device_mapping=None, dry_run=False): + """ + Will create an AMI from the instance in the running or stopped + state. + + :type instance_id: string + :param instance_id: the ID of the instance to image. + + :type name: string + :param name: The name of the new image + + :type description: string + :param description: An optional human-readable string describing + the contents and purpose of the AMI. + + :type no_reboot: bool + :param no_reboot: An optional flag indicating that the + bundling process should not attempt to shutdown the + instance before bundling. If this flag is True, the + responsibility of maintaining file system integrity is + left to the owner of the instance. + + :type block_device_mapping: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_mapping: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: string + :return: The new image id + """ + params = {'InstanceId': instance_id, + 'Name': name} + if description: + params['Description'] = description + if no_reboot: + params['NoReboot'] = 'true' + if block_device_mapping: + block_device_mapping.ec2_build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + img = self.get_object('CreateImage', params, Image, verb='POST') + return img.id + + # ImageAttribute methods + + def get_image_attribute(self, image_id, attribute='launchPermission', + dry_run=False): + """ + Gets an attribute from an image. + + :type image_id: string + :param image_id: The Amazon image id for which you want info about + + :type attribute: string + :param attribute: The attribute you need information about. + Valid choices are: + * launchPermission + * productCodes + * blockDeviceMapping + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.ImageAttribute` + :return: An ImageAttribute object representing the value of the + attribute requested + """ + params = {'ImageId': image_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeImageAttribute', params, + ImageAttribute, verb='POST') + + def modify_image_attribute(self, image_id, attribute='launchPermission', + operation='add', user_ids=None, groups=None, + product_codes=None, dry_run=False): + """ + Changes an attribute of an image. + + :type image_id: string + :param image_id: The image id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change + + :type operation: string + :param operation: Either add or remove (this is required for changing + launchPermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes + + :type product_codes: list + :param product_codes: Amazon DevPay product code. Currently only one + product code can be associated with an AMI. Once + set, the product code cannot be changed or reset. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'ImageId': image_id, + 'Attribute': attribute, + 'OperationType': operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + if product_codes: + self.build_list_params(params, product_codes, 'ProductCode') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyImageAttribute', params, verb='POST') + + def reset_image_attribute(self, image_id, attribute='launchPermission', + dry_run=False): + """ + Resets an attribute of an AMI to its default value. + + :type image_id: string + :param image_id: ID of the AMI for which an attribute will be described + + :type attribute: string + :param attribute: The attribute to reset + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'ImageId': image_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetImageAttribute', params, verb='POST') + + # Instance methods + + def get_all_instances(self, instance_ids=None, filters=None, dry_run=False, + max_results=None): + """ + Retrieve all the instance reservations associated with your account. + + .. note:: + This method's current behavior is deprecated in favor of + :meth:`get_all_reservations`. A future major release will change + :meth:`get_all_instances` to return a list of + :class:`boto.ec2.instance.Instance` objects as its name suggests. + To obtain that behavior today, use :meth:`get_only_instances`. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + + """ + warnings.warn(('The current get_all_instances implementation will be ' + 'replaced with get_all_reservations.'), + PendingDeprecationWarning) + return self.get_all_reservations(instance_ids=instance_ids, + filters=filters, dry_run=dry_run, + max_results=max_results) + + def get_only_instances(self, instance_ids=None, filters=None, + dry_run=False, max_results=None): + # A future release should rename this method to get_all_instances + # and make get_only_instances an alias for that. + """ + Retrieve all the instances associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Instance` + """ + next_token = None + retval = [] + while True: + reservations = self.get_all_reservations(instance_ids=instance_ids, + filters=filters, + dry_run=dry_run, + max_results=max_results, + next_token=next_token) + retval.extend([instance for reservation in reservations for + instance in reservation.instances]) + next_token = reservations.next_token + if not next_token: + break + + return retval + + def get_all_reservations(self, instance_ids=None, filters=None, + dry_run=False, max_results=None, next_token=None): + """ + Retrieve all the instance reservations associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + if 'group-id' in filters: + gid = filters.get('group-id') + if not gid.startswith('sg-') or len(gid) != 11: + warnings.warn( + "The group-id filter now requires a security group " + "identifier (sg-*) instead of a group name. To filter " + "by group name use the 'group-name' filter instead.", + UserWarning) + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeInstances', params, + [('item', Reservation)], verb='POST') + + def get_all_instance_status(self, instance_ids=None, + max_results=None, next_token=None, + filters=None, dry_run=False, + include_all_instances=False): + """ + Retrieve all the instances in your account scheduled for maintenance. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type include_all_instances: bool + :param include_all_instances: Set to True if all + instances should be returned. (Only running + instances are included by default.) + + :rtype: list + :return: A list of instances that have maintenance scheduled. + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if max_results: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if include_all_instances: + params['IncludeAllInstances'] = 'true' + return self.get_object('DescribeInstanceStatus', params, + InstanceStatusSet, verb='POST') + + def run_instances(self, image_id, min_count=1, max_count=1, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None, + disable_api_termination=False, + instance_initiated_shutdown_behavior=None, + private_ip_address=None, + placement_group=None, client_token=None, + security_group_ids=None, + additional_info=None, instance_profile_name=None, + instance_profile_arn=None, tenancy=None, + ebs_optimized=False, network_interfaces=None, + dry_run=False): + """ + Runs an image on EC2. + + :type image_id: string + :param image_id: The ID of the image to run. + + :type min_count: int + :param min_count: The minimum number of instances to launch. + + :type max_count: int + :param max_count: The maximum number of instances to launch. + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances. + + :type security_groups: list of strings + :param security_groups: The names of the EC2 classic security groups + with which to associate instances + + :type user_data: string + :param user_data: The user data passed to the launched instances + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * c4.large + * c4.xlarge + * c4.2xlarge + * c4.4xlarge + * c4.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The Availability Zone to launch the instance into. + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances. + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances. + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable detailed CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type private_ip_address: string + :param private_ip_address: If you're using VPC, you can + optionally use this parameter to assign the instance a + specific available IP address from the subnet (e.g., + 10.0.0.25). + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type disable_api_termination: bool + :param disable_api_termination: If True, the instances will be locked + and will not be able to be terminated via the API. + + :type instance_initiated_shutdown_behavior: string + :param instance_initiated_shutdown_behavior: Specifies whether the + instance stops or terminates on instance-initiated shutdown. + Valid values are: + + * stop + * terminate + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type client_token: string + :param client_token: Unique, case-sensitive identifier you provide + to ensure idempotency of the request. Maximum 64 ASCII characters. + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type additional_info: string + :param additional_info: Specifies additional information to make + available to the instance(s). + + :type tenancy: string + :param tenancy: The tenancy of the instance you want to + launch. An instance with a tenancy of 'dedicated' runs on + single-tenant hardware and can only be launched into a + VPC. Valid values are:"default" or "dedicated". + NOTE: To use dedicated tenancy you MUST specify a VPC + subnet-ID as well. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type ebs_optimized: bool + :param ebs_optimized: Whether the instance is optimized for + EBS I/O. This optimization provides dedicated throughput + to Amazon EBS and an optimized configuration stack to + provide optimal EBS I/O performance. This optimization + isn't available with all instance types. + + :type network_interfaces: :class:`boto.ec2.networkinterface.NetworkInterfaceCollection` + :param network_interfaces: A NetworkInterfaceCollection data + structure containing the ENI specifications for the instance. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with + the request for machines + """ + params = {'ImageId': image_id, + 'MinCount': min_count, + 'MaxCount': max_count} + if key_name: + params['KeyName'] = key_name + if security_group_ids: + l = [] + for group in security_group_ids: + if isinstance(group, SecurityGroup): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroupId') + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroup') + if user_data: + if isinstance(user_data, six.text_type): + user_data = user_data.encode('utf-8') + params['UserData'] = base64.b64encode(user_data).decode('utf-8') + if addressing_type: + params['AddressingType'] = addressing_type + if instance_type: + params['InstanceType'] = instance_type + if placement: + params['Placement.AvailabilityZone'] = placement + if placement_group: + params['Placement.GroupName'] = placement_group + if tenancy: + params['Placement.Tenancy'] = tenancy + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if monitoring_enabled: + params['Monitoring.Enabled'] = 'true' + if subnet_id: + params['SubnetId'] = subnet_id + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + if block_device_map: + block_device_map.ec2_build_list_params(params) + if disable_api_termination: + params['DisableApiTermination'] = 'true' + if instance_initiated_shutdown_behavior: + val = instance_initiated_shutdown_behavior + params['InstanceInitiatedShutdownBehavior'] = val + if client_token: + params['ClientToken'] = client_token + if additional_info: + params['AdditionalInfo'] = additional_info + if instance_profile_name: + params['IamInstanceProfile.Name'] = instance_profile_name + if instance_profile_arn: + params['IamInstanceProfile.Arn'] = instance_profile_arn + if ebs_optimized: + params['EbsOptimized'] = 'true' + if network_interfaces: + network_interfaces.build_list_params(params) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('RunInstances', params, Reservation, + verb='POST') + + def terminate_instances(self, instance_ids=None, dry_run=False): + """ + Terminate the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to terminate + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('TerminateInstances', params, + [('item', Instance)], verb='POST') + + def stop_instances(self, instance_ids=None, force=False, dry_run=False): + """ + Stop the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to stop + + :type force: bool + :param force: Forces the instance to stop + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances stopped + """ + params = {} + if force: + params['Force'] = 'true' + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('StopInstances', params, + [('item', Instance)], verb='POST') + + def start_instances(self, instance_ids=None, dry_run=False): + """ + Start the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to start + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances started + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('StartInstances', params, + [('item', Instance)], verb='POST') + + def get_console_output(self, instance_id, dry_run=False): + """ + Retrieves the console output for the specified instance. + + :type instance_id: string + :param instance_id: The instance ID of a running instance on the cloud. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.instance.ConsoleOutput` + :return: The console output as a ConsoleOutput object + """ + params = {} + self.build_list_params(params, [instance_id], 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_object('GetConsoleOutput', params, + ConsoleOutput, verb='POST') + + def reboot_instances(self, instance_ids=None, dry_run=False): + """ + Reboot the specified instances. + + :type instance_ids: list + :param instance_ids: The instances to terminate and reboot + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RebootInstances', params) + + def confirm_product_instance(self, product_code, instance_id, + dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'ProductCode': product_code, + 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + rs = self.get_object('ConfirmProductInstance', params, + ResultSet, verb='POST') + return (rs.status, rs.ownerId) + + # InstanceAttribute methods + + def get_instance_attribute(self, instance_id, attribute, dry_run=False): + """ + Gets an attribute from an instance. + + :type instance_id: string + :param instance_id: The Amazon id of the instance + + :type attribute: string + :param attribute: The attribute you need information about + Valid choices are: + + * instanceType + * kernel + * ramdisk + * userData + * disableApiTermination + * instanceInitiatedShutdownBehavior + * rootDeviceName + * blockDeviceMapping + * productCodes + * sourceDestCheck + * groupSet + * ebsOptimized + * sriovNetSupport + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.image.InstanceAttribute` + :return: An InstanceAttribute object representing the value of the + attribute requested + """ + params = {'InstanceId': instance_id} + if attribute: + params['Attribute'] = attribute + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeInstanceAttribute', params, + InstanceAttribute, verb='POST') + + def modify_network_interface_attribute(self, interface_id, attr, value, + attachment_id=None, dry_run=False): + """ + Changes an attribute of a network interface. + + :type interface_id: string + :param interface_id: The interface id. Looks like 'eni-xxxxxxxx' + + :type attr: string + :param attr: The attribute you wish to change. + + Learn more at http://docs.aws.amazon.com/AWSEC2/latest/API\ + Reference/ApiReference-query-ModifyNetworkInterfaceAttribute.html + + * description - Textual description of interface + * groupSet - List of security group ids or group objects + * sourceDestCheck - Boolean + * deleteOnTermination - Boolean. Must also specify attachment_id + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + + :type attachment_id: string + :param attachment_id: If you're modifying DeleteOnTermination you must + specify the attachment_id. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + bool_reqs = ( + 'deleteontermination', + 'sourcedestcheck', + ) + if attr.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + elif value not in ['true', 'false']: + raise ValueError('%s must be a boolean, "true", or "false"!' + % attr) + + params = {'NetworkInterfaceId': interface_id} + + # groupSet is handled differently from other arguments + if attr.lower() == 'groupset': + for idx, sg in enumerate(value): + if isinstance(sg, SecurityGroup): + sg = sg.id + params['SecurityGroupId.%s' % (idx + 1)] = sg + elif attr.lower() == 'description': + params['Description.Value'] = value + elif attr.lower() == 'sourcedestcheck': + params['SourceDestCheck.Value'] = value + elif attr.lower() == 'deleteontermination': + params['Attachment.DeleteOnTermination'] = value + if not attachment_id: + raise ValueError('You must also specify an attachment_id') + params['Attachment.AttachmentId'] = attachment_id + else: + raise ValueError('Unknown attribute "%s"' % (attr,)) + + if dry_run: + params['DryRun'] = 'true' + return self.get_status( + 'ModifyNetworkInterfaceAttribute', params, verb='POST') + + def modify_instance_attribute(self, instance_id, attribute, value, + dry_run=False): + """ + Changes an attribute of an instance + + :type instance_id: string + :param instance_id: The instance id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. + + * instanceType - A valid instance type (m1.small) + * kernel - Kernel ID (None) + * ramdisk - Ramdisk ID (None) + * userData - Base64 encoded String (None) + * disableApiTermination - Boolean (true) + * instanceInitiatedShutdownBehavior - stop|terminate + * blockDeviceMapping - List of strings - ie: ['/dev/sda=false'] + * sourceDestCheck - Boolean (true) + * groupSet - Set of Security Groups or IDs + * ebsOptimized - Boolean (false) + * sriovNetSupport - String - ie: 'simple' + + :type value: string + :param value: The new value for the attribute + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + # Allow a bool to be passed in for value of disableApiTermination + bool_reqs = ('disableapitermination', + 'sourcedestcheck', + 'ebsoptimized') + if attribute.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + + params = {'InstanceId': instance_id} + + # groupSet is handled differently from other arguments + if attribute.lower() == 'groupset': + for idx, sg in enumerate(value): + if isinstance(sg, SecurityGroup): + sg = sg.id + params['GroupId.%s' % (idx + 1)] = sg + elif attribute.lower() == 'blockdevicemapping': + for idx, kv in enumerate(value): + dev_name, _, flag = kv.partition('=') + pre = 'BlockDeviceMapping.%d' % (idx + 1) + params['%s.DeviceName' % pre] = dev_name + params['%s.Ebs.DeleteOnTermination' % pre] = flag or 'true' + else: + # for backwards compatibility handle lowercase first letter + attribute = attribute[0].upper() + attribute[1:] + params['%s.Value' % attribute] = value + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyInstanceAttribute', params, verb='POST') + + def reset_instance_attribute(self, instance_id, attribute, dry_run=False): + """ + Resets an attribute of an instance to its default value. + + :type instance_id: string + :param instance_id: ID of the instance + + :type attribute: string + :param attribute: The attribute to reset. Valid values are: + kernel|ramdisk + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'InstanceId': instance_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetInstanceAttribute', params, verb='POST') + + # Spot Instances + + def get_all_spot_instance_requests(self, request_ids=None, + filters=None, dry_run=False): + """ + Retrieve all the spot instances requests associated with your account. + + :type request_ids: list + :param request_ids: A list of strings of spot instance request IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + if filters: + if 'launch.group-id' in filters: + lgid = filters.get('launch.group-id') + if not lgid.startswith('sg-') or len(lgid) != 11: + warnings.warn( + "The 'launch.group-id' filter now requires a security " + "group id (sg-*) and no longer supports filtering by " + "group name. Please update your filters accordingly.", + UserWarning) + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSpotInstanceRequests', params, + [('item', SpotInstanceRequest)], verb='POST') + + def get_spot_price_history(self, start_time=None, end_time=None, + instance_type=None, product_description=None, + availability_zone=None, dry_run=False, + max_results=None, next_token=None, + filters=None): + """ + Retrieve the recent history of spot instances pricing. + + :type start_time: str + :param start_time: An indication of how far back to provide price + changes for. An ISO8601 DateTime string. + + :type end_time: str + :param end_time: An indication of how far forward to provide price + changes for. An ISO8601 DateTime string. + + :type instance_type: str + :param instance_type: Filter responses to a particular instance type. + + :type product_description: str + :param product_description: Filter responses to a particular platform. + Valid values are currently: + + * Linux/UNIX + * SUSE Linux + * Windows + * Linux/UNIX (Amazon VPC) + * SUSE Linux (Amazon VPC) + * Windows (Amazon VPC) + + :type availability_zone: str + :param availability_zone: The availability zone for which prices + should be returned. If not specified, data for all + availability zones will be returned. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated items + per response. + + :type next_token: str + :param next_token: The next set of rows to return. This should + be the value of the ``next_token`` attribute from a previous + call to ``get_spot_price_history``. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :rtype: list + :return: A list tuples containing price and timestamp. + """ + params = {} + if start_time: + params['StartTime'] = start_time + if end_time: + params['EndTime'] = end_time + if instance_type: + params['InstanceType'] = instance_type + if product_description: + params['ProductDescription'] = product_description + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeSpotPriceHistory', params, + [('item', SpotPriceHistory)], verb='POST') + + def request_spot_instances(self, price, image_id, count=1, type='one-time', + valid_from=None, valid_until=None, + launch_group=None, availability_zone_group=None, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + placement_group=None, + block_device_map=None, + instance_profile_arn=None, + instance_profile_name=None, + security_group_ids=None, + ebs_optimized=False, + network_interfaces=None, dry_run=False): + """ + Request instances on the spot market at a particular price. + + :type price: str + :param price: The maximum price of your bid + + :type image_id: string + :param image_id: The ID of the image to run + + :type count: int + :param count: The of instances to requested + + :type type: str + :param type: Type of request. Can be 'one-time' or 'persistent'. + Default is one-time. + + :type valid_from: str + :param valid_from: Start date of the request. An ISO8601 time string. + + :type valid_until: str + :param valid_until: End date of the request. An ISO8601 time string. + + :type launch_group: str + :param launch_group: If supplied, all requests will be fulfilled + as a group. + + :type availability_zone_group: str + :param availability_zone_group: If supplied, all requests will be + fulfilled within a single availability zone. + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to + associate instances + + :type user_data: string + :param user_data: The user data passed to the launched instances + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * c4.large + * c4.xlarge + * c4.2xlarge + * c4.4xlarge + * c4.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The availability zone in which to launch + the instances + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable detailed CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type ebs_optimized: bool + :param ebs_optimized: Whether the instance is optimized for + EBS I/O. This optimization provides dedicated throughput + to Amazon EBS and an optimized configuration stack to + provide optimal EBS I/O performance. This optimization + isn't available with all instance types. + + :type network_interfaces: list + :param network_interfaces: A list of + :class:`boto.ec2.networkinterface.NetworkInterfaceSpecification` + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Reservation + :return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` + associated with the request for machines + """ + ls = 'LaunchSpecification' + params = {'%s.ImageId' % ls: image_id, + 'Type': type, + 'SpotPrice': price} + if count: + params['InstanceCount'] = count + if valid_from: + params['ValidFrom'] = valid_from + if valid_until: + params['ValidUntil'] = valid_until + if launch_group: + params['LaunchGroup'] = launch_group + if availability_zone_group: + params['AvailabilityZoneGroup'] = availability_zone_group + if key_name: + params['%s.KeyName' % ls] = key_name + if security_group_ids: + l = [] + for group in security_group_ids: + if isinstance(group, SecurityGroup): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, + '%s.SecurityGroupId' % ls) + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, '%s.SecurityGroup' % ls) + if user_data: + params['%s.UserData' % ls] = base64.b64encode(user_data) + if addressing_type: + params['%s.AddressingType' % ls] = addressing_type + if instance_type: + params['%s.InstanceType' % ls] = instance_type + if placement: + params['%s.Placement.AvailabilityZone' % ls] = placement + if kernel_id: + params['%s.KernelId' % ls] = kernel_id + if ramdisk_id: + params['%s.RamdiskId' % ls] = ramdisk_id + if monitoring_enabled: + params['%s.Monitoring.Enabled' % ls] = 'true' + if subnet_id: + params['%s.SubnetId' % ls] = subnet_id + if placement_group: + params['%s.Placement.GroupName' % ls] = placement_group + if block_device_map: + block_device_map.ec2_build_list_params(params, '%s.' % ls) + if instance_profile_name: + params['%s.IamInstanceProfile.Name' % ls] = instance_profile_name + if instance_profile_arn: + params['%s.IamInstanceProfile.Arn' % ls] = instance_profile_arn + if ebs_optimized: + params['%s.EbsOptimized' % ls] = 'true' + if network_interfaces: + network_interfaces.build_list_params(params, prefix=ls + '.') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('RequestSpotInstances', params, + [('item', SpotInstanceRequest)], + verb='POST') + + def cancel_spot_instance_requests(self, request_ids, dry_run=False): + """ + Cancel the specified Spot Instance Requests. + + :type request_ids: list + :param request_ids: A list of strings of the Request IDs to terminate + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CancelSpotInstanceRequests', params, + [('item', SpotInstanceRequest)], verb='POST') + + def get_spot_datafeed_subscription(self, dry_run=False): + """ + Return the current spot instance data feed subscription + associated with this account, if any. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeSpotDatafeedSubscription', + params, SpotDatafeedSubscription, verb='POST') + + def create_spot_datafeed_subscription(self, bucket, prefix, dry_run=False): + """ + Create a spot instance datafeed subscription for this account. + + :type bucket: str or unicode + :param bucket: The name of the bucket where spot instance data + will be written. The account issuing this request + must have FULL_CONTROL access to the bucket + specified in the request. + + :type prefix: str or unicode + :param prefix: An optional prefix that will be pre-pended to all + data files written to the bucket. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + params = {'Bucket': bucket} + if prefix: + params['Prefix'] = prefix + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateSpotDatafeedSubscription', + params, SpotDatafeedSubscription, verb='POST') + + def delete_spot_datafeed_subscription(self, dry_run=False): + """ + Delete the current spot instance data feed subscription + associated with this account + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSpotDatafeedSubscription', + params, verb='POST') + + # Zone methods + + def get_all_zones(self, zones=None, filters=None, dry_run=False): + """ + Get all Availability Zones associated with the current region. + + :type zones: list + :param zones: Optional list of zones. If this list is present, + only the Zones associated with these zone names + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.zone.Zone` + :return: The requested Zone objects + """ + params = {} + if zones: + self.build_list_params(params, zones, 'ZoneName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAvailabilityZones', params, + [('item', Zone)], verb='POST') + + # Address methods + + def get_all_addresses(self, addresses=None, filters=None, + allocation_ids=None, dry_run=False): + """ + Get all EIP's associated with the current credentials. + + :type addresses: list + :param addresses: Optional list of addresses. If this list is present, + only the Addresses associated with these addresses + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type allocation_ids: list + :param allocation_ids: Optional list of allocation IDs. If this list is + present, only the Addresses associated with the given + allocation IDs will be returned. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.address.Address` + :return: The requested Address objects + """ + params = {} + if addresses: + self.build_list_params(params, addresses, 'PublicIp') + if allocation_ids: + self.build_list_params(params, allocation_ids, 'AllocationId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST') + + def allocate_address(self, domain=None, dry_run=False): + """ + Allocate a new Elastic IP address and associate it with your account. + + :type domain: string + :param domain: Optional string. If domain is set to "vpc" the address + will be allocated to VPC . Will return address object with + allocation_id. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.address.Address` + :return: The newly allocated Address + """ + params = {} + + if domain is not None: + params['Domain'] = domain + + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('AllocateAddress', params, Address, verb='POST') + + def assign_private_ip_addresses(self, network_interface_id=None, + private_ip_addresses=None, + secondary_private_ip_address_count=None, + allow_reassignment=False, dry_run=False): + """ + Assigns one or more secondary private IP addresses to a network + interface in Amazon VPC. + + :type network_interface_id: string + :param network_interface_id: The network interface to which the IP + address will be assigned. + + :type private_ip_addresses: list + :param private_ip_addresses: Assigns the specified IP addresses as + secondary IP addresses to the network interface. + + :type secondary_private_ip_address_count: int + :param secondary_private_ip_address_count: The number of secondary IP + addresses to assign to the network interface. You cannot specify + this parameter when also specifying private_ip_addresses. + + :type allow_reassignment: bool + :param allow_reassignment: Specifies whether to allow an IP address + that is already assigned to another network interface or instance + to be reassigned to the specified network interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + if private_ip_addresses is not None: + self.build_list_params(params, private_ip_addresses, + 'PrivateIpAddress') + elif secondary_private_ip_address_count is not None: + params['SecondaryPrivateIpAddressCount'] = \ + secondary_private_ip_address_count + + if allow_reassignment: + params['AllowReassignment'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AssignPrivateIpAddresses', params, verb='POST') + + def _associate_address(self, status, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + elif network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + # Allocation id trumps public ip in order to associate with VPCs + if allocation_id is not None: + params['AllocationId'] = allocation_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if private_ip_address is not None: + params['PrivateIpAddress'] = private_ip_address + + if allow_reassociation: + params['AllowReassociation'] = 'true' + + if dry_run: + params['DryRun'] = 'true' + + if status: + return self.get_status('AssociateAddress', params, verb='POST') + else: + return self.get_object('AssociateAddress', params, Address, + verb='POST') + + def associate_address(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type instance_id: string + :param instance_id: The ID of the instance + + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. + + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self._associate_address(True, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) + + def associate_address_object(self, instance_id=None, public_ip=None, + allocation_id=None, network_interface_id=None, + private_ip_address=None, allow_reassociation=False, + dry_run=False): + """ + Associate an Elastic IP address with a currently running instance. + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type instance_id: string + :param instance_id: The ID of the instance + + :type public_ip: string + :param public_ip: The public IP address for EC2 based allocations. + + :type allocation_id: string + :param allocation_id: The allocation ID for a VPC-based elastic IP. + + :type network_interface_id: string + :param network_interface_id: The network interface ID to which + elastic IP is to be assigned to + + :type private_ip_address: string + :param private_ip_address: The primary or secondary private IP address + to associate with the Elastic IP address. + + :type allow_reassociation: bool + :param allow_reassociation: Specify this option to allow an Elastic IP + address that is already associated with another network interface + or instance to be re-associated with the specified instance or + interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: class:`boto.ec2.address.Address` + :return: The associated address instance + """ + return self._associate_address(False, instance_id=instance_id, + public_ip=public_ip, allocation_id=allocation_id, + network_interface_id=network_interface_id, + private_ip_address=private_ip_address, + allow_reassociation=allow_reassociation, dry_run=dry_run) + + def disassociate_address(self, public_ip=None, association_id=None, + dry_run=False): + """ + Disassociate an Elastic IP address from a currently running instance. + + :type public_ip: string + :param public_ip: The public IP address for EC2 elastic IPs. + + :type association_id: string + :param association_id: The association ID for a VPC based elastic ip. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + # If there is an association id it trumps public ip + # in order to successfully dissassociate with a VPC elastic ip + if association_id is not None: + params['AssociationId'] = association_id + elif public_ip is not None: + params['PublicIp'] = public_ip + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('DisassociateAddress', params, verb='POST') + + def release_address(self, public_ip=None, allocation_id=None, + dry_run=False): + """ + Free up an Elastic IP address. Pass a public IP address to + release an EC2 Elastic IP address and an AllocationId to + release a VPC Elastic IP address. You should only pass + one value. + + This requires one of ``public_ip`` or ``allocation_id`` depending + on if you're associating a VPC address or a plain EC2 address. + + When using an Allocation ID, make sure to pass ``None`` for ``public_ip`` + as EC2 expects a single parameter and if ``public_ip`` is passed boto + will preference that instead of ``allocation_id``. + + :type public_ip: string + :param public_ip: The public IP address for EC2 elastic IPs. + + :type allocation_id: string + :param allocation_id: The Allocation ID for VPC elastic IPs. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if public_ip is not None: + params['PublicIp'] = public_ip + elif allocation_id is not None: + params['AllocationId'] = allocation_id + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('ReleaseAddress', params, verb='POST') + + def unassign_private_ip_addresses(self, network_interface_id=None, + private_ip_addresses=None, dry_run=False): + """ + Unassigns one or more secondary private IP addresses from a network + interface in Amazon VPC. + + :type network_interface_id: string + :param network_interface_id: The network interface from which the + secondary private IP address will be unassigned. + + :type private_ip_addresses: list + :param private_ip_addresses: Specifies the secondary private IP + addresses that you want to unassign from the network interface. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {} + + if network_interface_id is not None: + params['NetworkInterfaceId'] = network_interface_id + + if private_ip_addresses is not None: + self.build_list_params(params, private_ip_addresses, + 'PrivateIpAddress') + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('UnassignPrivateIpAddresses', params, + verb='POST') + + # Volume methods + + def get_all_volumes(self, volume_ids=None, filters=None, dry_run=False): + """ + Get all Volumes associated with the current credentials. + + :type volume_ids: list + :param volume_ids: Optional list of volume ids. If this list + is present, only the volumes associated with + these volume ids will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.volume.Volume` + :return: The requested Volume objects + """ + params = {} + if volume_ids: + self.build_list_params(params, volume_ids, 'VolumeId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVolumes', params, + [('item', Volume)], verb='POST') + + def get_all_volume_status(self, volume_ids=None, + max_results=None, next_token=None, + filters=None, dry_run=False): + """ + Retrieve the status of one or more volumes. + + :type volume_ids: list + :param volume_ids: A list of strings of volume IDs + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of volume status. + """ + params = {} + if volume_ids: + self.build_list_params(params, volume_ids, 'VolumeId') + if max_results: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVolumeStatus', params, + VolumeStatusSet, verb='POST') + + def enable_volume_io(self, volume_id, dry_run=False): + """ + Enables I/O operations for a volume that had I/O operations + disabled because the data on the volume was potentially inconsistent. + + :type volume_id: str + :param volume_id: The ID of the volume. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVolumeIO', params, verb='POST') + + def get_volume_attribute(self, volume_id, + attribute='autoEnableIO', dry_run=False): + """ + Describes attribute of the volume. + + :type volume_id: str + :param volume_id: The ID of the volume. + + :type attribute: str + :param attribute: The requested attribute. Valid values are: + + * autoEnableIO + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.volume.VolumeAttribute` + :return: The requested Volume attribute + """ + params = {'VolumeId': volume_id, 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVolumeAttribute', params, + VolumeAttribute, verb='POST') + + def modify_volume_attribute(self, volume_id, attribute, new_value, + dry_run=False): + """ + Changes an attribute of an Volume. + + :type volume_id: string + :param volume_id: The volume id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. Valid values are: + AutoEnableIO. + + :type new_value: string + :param new_value: The new value of the attribute. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'VolumeId': volume_id} + if attribute == 'AutoEnableIO': + params['AutoEnableIO.Value'] = new_value + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVolumeAttribute', params, verb='POST') + + def create_volume(self, size, zone, snapshot=None, volume_type=None, + iops=None, encrypted=False, kms_key_id=None, dry_run=False): + """ + Create a new EBS Volume. + + :type size: int + :param size: The size of the new volume, in GiB + + :type zone: string or :class:`boto.ec2.zone.Zone` + :param zone: The availability zone in which the Volume will be created. + + :type snapshot: string or :class:`boto.ec2.snapshot.Snapshot` + :param snapshot: The snapshot from which the new Volume will be + created. + + :type volume_type: string + :param volume_type: The type of the volume. (optional). Valid + values are: standard | io1 | gp2. + + :type iops: int + :param iops: The provisioned IOPS you want to associate with + this volume. (optional) + + :type encrypted: bool + :param encrypted: Specifies whether the volume should be encrypted. + (optional) + + :type kms_key_id: string + :params kms_key_id: If encrypted is True, this KMS Key ID may be specified to + encrypt volume with this key (optional) + e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + if isinstance(zone, Zone): + zone = zone.name + params = {'AvailabilityZone': zone} + if size: + params['Size'] = size + if snapshot: + if isinstance(snapshot, Snapshot): + snapshot = snapshot.id + params['SnapshotId'] = snapshot + if volume_type: + params['VolumeType'] = volume_type + if iops: + params['Iops'] = str(iops) + if encrypted: + params['Encrypted'] = 'true' + if kms_key_id: + params['KmsKeyId'] = kms_key_id + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVolume', params, Volume, verb='POST') + + def delete_volume(self, volume_id, dry_run=False): + """ + Delete an EBS volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVolume', params, verb='POST') + + def attach_volume(self, volume_id, instance_id, device, dry_run=False): + """ + Attach an EBS volume to an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposted (e.g. /dev/sdh) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'InstanceId': instance_id, + 'VolumeId': volume_id, + 'Device': device} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachVolume', params, verb='POST') + + def detach_volume(self, volume_id, instance_id=None, + device=None, force=False, dry_run=False): + """ + Detach an EBS volume from an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance from which it will + be detached. + + :type device: str + :param device: The device on the instance through which the + volume is exposted (e.g. /dev/sdh) + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. This option can lead to + data loss or a corrupted file system. Use this option only + as a last resort to detach a volume from a failed + instance. The instance will not have an opportunity to + flush file system caches nor file system meta data. If you + use this option, you must perform file system check and + repair procedures. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + if instance_id: + params['InstanceId'] = instance_id + if device: + params['Device'] = device + if force: + params['Force'] = 'true' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachVolume', params, verb='POST') + + # Snapshot methods + + def get_all_snapshots(self, snapshot_ids=None, + owner=None, restorable_by=None, + filters=None, dry_run=False): + """ + Get all EBS Snapshots associated with the current credentials. + + :type snapshot_ids: list + :param snapshot_ids: Optional list of snapshot ids. If this list is + present, only the Snapshots associated with + these snapshot ids will be returned. + + :type owner: str or list + :param owner: If present, only the snapshots owned by the specified user(s) + will be returned. Valid values are: + + * self + * amazon + * AWS Account ID + + :type restorable_by: str or list + :param restorable_by: If present, only the snapshots that are restorable + by the specified account id(s) will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.snapshot.Snapshot` + :return: The requested Snapshot objects + """ + params = {} + if snapshot_ids: + self.build_list_params(params, snapshot_ids, 'SnapshotId') + + if owner: + self.build_list_params(params, owner, 'Owner') + if restorable_by: + self.build_list_params(params, restorable_by, 'RestorableBy') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSnapshots', params, + [('item', Snapshot)], verb='POST') + + def create_snapshot(self, volume_id, description=None, dry_run=False): + """ + Create a snapshot of an existing EBS Volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be snapshot'ed + + :type description: str + :param description: A description of the snapshot. + Limited to 255 characters. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.snapshot.Snapshot` + :return: The created Snapshot object + """ + params = {'VolumeId': volume_id} + if description: + params['Description'] = description[0:255] + if dry_run: + params['DryRun'] = 'true' + snapshot = self.get_object('CreateSnapshot', params, + Snapshot, verb='POST') + volume = self.get_all_volumes([volume_id], dry_run=dry_run)[0] + volume_name = volume.tags.get('Name') + if volume_name: + snapshot.add_tag('Name', volume_name) + return snapshot + + def delete_snapshot(self, snapshot_id, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'SnapshotId': snapshot_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSnapshot', params, verb='POST') + + def copy_snapshot(self, source_region, source_snapshot_id, + description=None, dry_run=False): + """ + Copies a point-in-time snapshot of an Amazon Elastic Block Store + (Amazon EBS) volume and stores it in Amazon Simple Storage Service + (Amazon S3). You can copy the snapshot within the same region or from + one region to another. You can use the snapshot to create new Amazon + EBS volumes or Amazon Machine Images (AMIs). + + + :type source_region: str + :param source_region: The ID of the AWS region that contains the + snapshot to be copied (e.g 'us-east-1', 'us-west-2', etc.). + + :type source_snapshot_id: str + :param source_snapshot_id: The ID of the Amazon EBS snapshot to copy + + :type description: str + :param description: A description of the new Amazon EBS snapshot. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: The snapshot ID + + """ + params = { + 'SourceRegion': source_region, + 'SourceSnapshotId': source_snapshot_id, + } + if description is not None: + params['Description'] = description + if dry_run: + params['DryRun'] = 'true' + snapshot = self.get_object('CopySnapshot', params, Snapshot, + verb='POST') + return snapshot.id + + def trim_snapshots(self, hourly_backups=8, daily_backups=7, + weekly_backups=4, monthly_backups=True): + """ + Trim excess snapshots, based on when they were taken. More current + snapshots are retained, with the number retained decreasing as you + move back in time. + + If ebs volumes have a 'Name' tag with a value, their snapshots + will be assigned the same tag when they are created. The values + of the 'Name' tags for snapshots are used by this function to + group snapshots taken from the same volume (or from a series + of like-named volumes over time) for trimming. + + For every group of like-named snapshots, this function retains + the newest and oldest snapshots, as well as, by default, the + first snapshots taken in each of the last eight hours, the first + snapshots taken in each of the last seven days, the first snapshots + taken in the last 4 weeks (counting Midnight Sunday morning as + the start of the week), and the first snapshot from the first + day of each month forever. + + :type hourly_backups: int + :param hourly_backups: How many recent hourly backups should be saved. + + :type daily_backups: int + :param daily_backups: How many recent daily backups should be saved. + + :type weekly_backups: int + :param weekly_backups: How many recent weekly backups should be saved. + + :type monthly_backups: int + :param monthly_backups: How many monthly backups should be saved. Use True for no limit. + """ + + # This function first builds up an ordered list of target times + # that snapshots should be saved for (last 8 hours, last 7 days, etc.). + # Then a map of snapshots is constructed, with the keys being + # the snapshot / volume names and the values being arrays of + # chronologically sorted snapshots. + # Finally, for each array in the map, we go through the snapshot + # array and the target time array in an interleaved fashion, + # deleting snapshots whose start_times don't immediately follow a + # target time (we delete a snapshot if there's another snapshot + # that was made closer to the preceding target time). + + now = datetime.utcnow() + last_hour = datetime(now.year, now.month, now.day, now.hour) + last_midnight = datetime(now.year, now.month, now.day) + last_sunday = datetime(now.year, now.month, now.day) - timedelta(days=(now.weekday() + 1) % 7) + start_of_month = datetime(now.year, now.month, 1) + + target_backup_times = [] + + # there are no snapshots older than 1/1/2007 + oldest_snapshot_date = datetime(2007, 1, 1) + + for hour in range(0, hourly_backups): + target_backup_times.append(last_hour - timedelta(hours=hour)) + + for day in range(0, daily_backups): + target_backup_times.append(last_midnight - timedelta(days=day)) + + for week in range(0, weekly_backups): + target_backup_times.append(last_sunday - timedelta(weeks=week)) + + one_day = timedelta(days=1) + monthly_snapshots_added = 0 + while (start_of_month > oldest_snapshot_date and + (monthly_backups is True or + monthly_snapshots_added < monthly_backups)): + # append the start of the month to the list of + # snapshot dates to save: + target_backup_times.append(start_of_month) + monthly_snapshots_added += 1 + # there's no timedelta setting for one month, so instead: + # decrement the day by one, so we go to the final day of + # the previous month... + start_of_month -= one_day + # ... and then go to the first day of that previous month: + start_of_month = datetime(start_of_month.year, + start_of_month.month, 1) + + temp = [] + + for t in target_backup_times: + if temp.__contains__(t) == False: + temp.append(t) + + # sort to make the oldest dates first, and make sure the month start + # and last four week's start are in the proper order + target_backup_times = sorted(temp) + + # get all the snapshots, sort them by date and time, and + # organize them into one array for each volume: + all_snapshots = self.get_all_snapshots(owner = 'self') + all_snapshots.sort(key=lambda x: x.start_time) + snaps_for_each_volume = {} + for snap in all_snapshots: + # the snapshot name and the volume name are the same. + # The snapshot name is set from the volume + # name at the time the snapshot is taken + volume_name = snap.tags.get('Name') + if volume_name: + # only examine snapshots that have a volume name + snaps_for_volume = snaps_for_each_volume.get(volume_name) + if not snaps_for_volume: + snaps_for_volume = [] + snaps_for_each_volume[volume_name] = snaps_for_volume + snaps_for_volume.append(snap) + + # Do a running comparison of snapshot dates to desired time + #periods, keeping the oldest snapshot in each + # time period and deleting the rest: + for volume_name in snaps_for_each_volume: + snaps = snaps_for_each_volume[volume_name] + snaps = snaps[:-1] # never delete the newest snapshot + time_period_number = 0 + snap_found_for_this_time_period = False + for snap in snaps: + check_this_snap = True + while check_this_snap and time_period_number < target_backup_times.__len__(): + snap_date = datetime.strptime(snap.start_time, + '%Y-%m-%dT%H:%M:%S.000Z') + if snap_date < target_backup_times[time_period_number]: + # the snap date is before the cutoff date. + # Figure out if it's the first snap in this + # date range and act accordingly (since both + #date the date ranges and the snapshots + # are sorted chronologically, we know this + #snapshot isn't in an earlier date range): + if snap_found_for_this_time_period == True: + if not snap.tags.get('preserve_snapshot'): + # as long as the snapshot wasn't marked + # with the 'preserve_snapshot' tag, delete it: + try: + self.delete_snapshot(snap.id) + boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time)) + except EC2ResponseError: + boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time)) + # go on and look at the next snapshot, + #leaving the time period alone + else: + # this was the first snapshot found for this + #time period. Leave it alone and look at the + # next snapshot: + snap_found_for_this_time_period = True + check_this_snap = False + else: + # the snap is after the cutoff date. Check it + # against the next cutoff date + time_period_number += 1 + snap_found_for_this_time_period = False + + def get_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + dry_run=False): + """ + Get information about an attribute of a snapshot. Only one attribute + can be specified per call. + + :type snapshot_id: str + :param snapshot_id: The ID of the snapshot. + + :type attribute: str + :param attribute: The requested attribute. Valid values are: + + * createVolumePermission + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute` + :return: The requested Snapshot attribute + """ + params = {'Attribute': attribute} + if snapshot_id: + params['SnapshotId'] = snapshot_id + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeSnapshotAttribute', params, + SnapshotAttribute, verb='POST') + + def modify_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + operation='add', user_ids=None, groups=None, + dry_run=False): + """ + Changes an attribute of an image. + + :type snapshot_id: string + :param snapshot_id: The snapshot id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. Valid values are: + createVolumePermission + + :type operation: string + :param operation: Either add or remove (this is required for changing + snapshot ermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes. The only valid + value at this time is 'all'. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'SnapshotId': snapshot_id, + 'Attribute': attribute, + 'OperationType': operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifySnapshotAttribute', params, verb='POST') + + def reset_snapshot_attribute(self, snapshot_id, + attribute='createVolumePermission', + dry_run=False): + """ + Resets an attribute of a snapshot to its default value. + + :type snapshot_id: string + :param snapshot_id: ID of the snapshot + + :type attribute: string + :param attribute: The attribute to reset + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'SnapshotId': snapshot_id, + 'Attribute': attribute} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ResetSnapshotAttribute', params, verb='POST') + + # Keypair methods + + def get_all_key_pairs(self, keynames=None, filters=None, dry_run=False): + """ + Get all key pairs associated with your account. + + :type keynames: list + :param keynames: A list of the names of keypairs to retrieve. + If not provided, all key pairs will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.keypair.KeyPair` + """ + params = {} + if keynames: + self.build_list_params(params, keynames, 'KeyName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeKeyPairs', params, + [('item', KeyPair)], verb='POST') + + def get_key_pair(self, keyname, dry_run=False): + """ + Convenience method to retrieve a specific keypair (KeyPair). + + :type keyname: string + :param keyname: The name of the keypair to retrieve + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The KeyPair specified or None if it is not found + """ + try: + return self.get_all_key_pairs( + keynames=[keyname], + dry_run=dry_run + )[0] + except self.ResponseError as e: + if e.code == 'InvalidKeyPair.NotFound': + return None + else: + raise + + def create_key_pair(self, key_name, dry_run=False): + """ + Create a new key pair for your account. + This will create the key pair within the region you + are currently connected to. + + :type key_name: string + :param key_name: The name of the new keypair + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The newly created :class:`boto.ec2.keypair.KeyPair`. + The material attribute of the new KeyPair object + will contain the the unencrypted PEM encoded RSA private key. + """ + params = {'KeyName': key_name} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateKeyPair', params, KeyPair, verb='POST') + + def delete_key_pair(self, key_name, dry_run=False): + """ + Delete a key pair from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'KeyName': key_name} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteKeyPair', params, verb='POST') + + def import_key_pair(self, key_name, public_key_material, dry_run=False): + """ + imports the public key from an RSA key pair that you created + with a third-party tool. + + Supported formats: + + * OpenSSH public key format (e.g., the format + in ~/.ssh/authorized_keys) + + * Base64 encoded DER format + + * SSH public key file format as specified in RFC4716 + + DSA keys are not supported. Make sure your key generator is + set up to create RSA keys. + + Supported lengths: 1024, 2048, and 4096. + + :type key_name: string + :param key_name: The name of the new keypair + + :type public_key_material: string + :param public_key_material: The public key. You must base64 encode + the public key material before sending + it to AWS. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: A :class:`boto.ec2.keypair.KeyPair` object representing + the newly imported key pair. This object will contain only + the key name and the fingerprint. + """ + public_key_material = base64.b64encode(public_key_material) + params = {'KeyName': key_name, + 'PublicKeyMaterial': public_key_material} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('ImportKeyPair', params, KeyPair, verb='POST') + + # SecurityGroup methods + + def get_all_security_groups(self, groupnames=None, group_ids=None, + filters=None, dry_run=False): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will be + returned. + + :type group_ids: list + :param group_ids: A list of IDs of security groups to retrieve for + security groups within a VPC. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.securitygroup.SecurityGroup` + """ + params = {} + if groupnames is not None: + self.build_list_params(params, groupnames, 'GroupName') + if group_ids is not None: + self.build_list_params(params, group_ids, 'GroupId') + if filters is not None: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSecurityGroups', params, + [('item', SecurityGroup)], verb='POST') + + def create_security_group(self, name, description, vpc_id=None, + dry_run=False): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :type vpc_id: string + :param vpc_id: The ID of the VPC to create the security group in, + if any. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The newly created :class:`boto.ec2.securitygroup.SecurityGroup`. + """ + params = {'GroupName': name, + 'GroupDescription': description} + + if vpc_id is not None: + params['VpcId'] = vpc_id + + if dry_run: + params['DryRun'] = 'true' + + group = self.get_object('CreateSecurityGroup', params, + SecurityGroup, verb='POST') + group.name = name + group.description = description + if vpc_id is not None: + group.vpc_id = vpc_id + return group + + def delete_security_group(self, name=None, group_id=None, dry_run=False): + """ + Delete a security group from your account. + + :type name: string + :param name: The name of the security group to delete. + + :type group_id: string + :param group_id: The ID of the security group to delete within + a VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {} + + if name is not None: + params['GroupName'] = name + elif group_id is not None: + params['GroupId'] = group_id + + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('DeleteSecurityGroup', params, verb='POST') + + def authorize_security_group_deprecated(self, group_name, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, dry_run=False): + """ + NOTE: This method uses the old-style request parameters + that did not allow a port to be specified when + authorizing a group. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + granting access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are granting access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type to_port: string + :param to_port: The CIDR block you are providing access to. + See http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName': group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AuthorizeSecurityGroupIngress', params) + + def authorize_security_group(self, group_name=None, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, group_id=None, + src_security_group_group_id=None, + dry_run=False): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + granting access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are granting access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type cidr_ip: string or list of strings + :param cidr_ip: The CIDR block you are providing access to. + See http://goo.gl/Yj5QC + + :type group_id: string + :param group_id: ID of the EC2 or VPC security group to + modify. This is required for VPC security groups and can + be used instead of group_name for EC2 security groups. + + :type src_security_group_group_id: string + :param src_security_group_group_id: The ID of the security + group you are granting access to. Can be used instead of + src_security_group_name + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + if src_security_group_name: + if from_port is None and to_port is None and ip_protocol is None: + return self.authorize_security_group_deprecated( + group_name, src_security_group_name, + src_security_group_owner_id) + + params = {} + + if group_name: + params['GroupName'] = group_name + if group_id: + params['GroupId'] = group_id + if src_security_group_name: + param_name = 'IpPermissions.1.Groups.1.GroupName' + params[param_name] = src_security_group_name + if src_security_group_owner_id: + param_name = 'IpPermissions.1.Groups.1.UserId' + params[param_name] = src_security_group_owner_id + if src_security_group_group_id: + param_name = 'IpPermissions.1.Groups.1.GroupId' + params[param_name] = src_security_group_group_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if cidr_ip: + if not isinstance(cidr_ip, list): + cidr_ip = [cidr_ip] + for i, single_cidr_ip in enumerate(cidr_ip): + params['IpPermissions.1.IpRanges.%d.CidrIp' % (i + 1)] = \ + single_cidr_ip + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AuthorizeSecurityGroupIngress', + params, verb='POST') + + def authorize_security_group_egress(self, + group_id, + ip_protocol, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip=None, + dry_run=False): + """ + The action adds one or more egress rules to a VPC security + group. Specifically, this action permits instances in a + security group to send traffic to one or more destination + CIDR IP address ranges, or to one or more destination + security groups in the same VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'GroupId': group_id, + 'IpPermissions.1.IpProtocol': ip_protocol + } + + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if src_group_id is not None: + params['IpPermissions.1.Groups.1.GroupId'] = src_group_id + if cidr_ip is not None: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('AuthorizeSecurityGroupEgress', + params, verb='POST') + + def revoke_security_group_deprecated(self, group_name, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, + from_port=None, to_port=None, + cidr_ip=None, dry_run=False): + """ + NOTE: This method uses the old-style request parameters + that did not allow a port to be specified when + authorizing a group. + + Remove an existing rule from an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are revoking another + group or you are revoking some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + revoking access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are revoking access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type to_port: string + :param to_port: The CIDR block you are revoking access to. + http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName': group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupIngress', params) + + def revoke_security_group(self, group_name=None, + src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, group_id=None, + src_security_group_group_id=None, dry_run=False): + """ + Remove an existing rule from an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are revoking another + group or you are revoking some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + revoking access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security + group you are revoking access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are revoking access to. + See http://goo.gl/Yj5QC + + :type group_id: string + :param group_id: ID of the EC2 or VPC security group to + modify. This is required for VPC security groups and can + be used instead of group_name for EC2 security groups. + + :type src_security_group_group_id: string + :param src_security_group_group_id: The ID of the security group + for which you are revoking access. Can be used instead + of src_security_group_name + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + if src_security_group_name: + if from_port is None and to_port is None and ip_protocol is None: + return self.revoke_security_group_deprecated( + group_name, src_security_group_name, + src_security_group_owner_id) + params = {} + if group_name is not None: + params['GroupName'] = group_name + if group_id is not None: + params['GroupId'] = group_id + if src_security_group_name: + param_name = 'IpPermissions.1.Groups.1.GroupName' + params[param_name] = src_security_group_name + if src_security_group_group_id: + param_name = 'IpPermissions.1.Groups.1.GroupId' + params[param_name] = src_security_group_group_id + if src_security_group_owner_id: + param_name = 'IpPermissions.1.Groups.1.UserId' + params[param_name] = src_security_group_owner_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if cidr_ip: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupIngress', + params, verb='POST') + + def revoke_security_group_egress(self, + group_id, + ip_protocol, + from_port=None, + to_port=None, + src_group_id=None, + cidr_ip=None, dry_run=False): + """ + Remove an existing egress rule from an existing VPC security + group. You need to pass in an ip_protocol, from_port and + to_port range only if the protocol you are using is + port-based. You also need to pass in either a src_group_id or + cidr_ip. + + :type group_name: string + :param group_id: The name of the security group you are removing + the rule from. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp | -1 + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type src_group_id: src_group_id + :param src_group_id: The source security group you are + revoking access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are revoking access to. + See http://goo.gl/Yj5QC + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful. + """ + + params = {} + if group_id: + params['GroupId'] = group_id + if ip_protocol: + params['IpPermissions.1.IpProtocol'] = ip_protocol + if from_port is not None: + params['IpPermissions.1.FromPort'] = from_port + if to_port is not None: + params['IpPermissions.1.ToPort'] = to_port + if src_group_id is not None: + params['IpPermissions.1.Groups.1.GroupId'] = src_group_id + if cidr_ip: + params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RevokeSecurityGroupEgress', + params, verb='POST') + + # + # Regions + # + + def get_all_regions(self, region_names=None, filters=None, dry_run=False): + """ + Get all available regions for the EC2 service. + + :type region_names: list of str + :param region_names: Names of regions to limit output + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + params = {} + if region_names: + self.build_list_params(params, region_names, 'RegionName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + regions = self.get_list('DescribeRegions', params, + [('item', RegionInfo)], verb='POST') + for region in regions: + region.connection_cls = EC2Connection + return regions + + # + # Reservation methods + # + + def get_all_reserved_instances_offerings(self, + reserved_instances_offering_ids=None, + instance_type=None, + availability_zone=None, + product_description=None, + filters=None, + instance_tenancy=None, + offering_type=None, + include_marketplace=None, + min_duration=None, + max_duration=None, + max_instance_count=None, + next_token=None, + max_results=None, + dry_run=False): + """ + Describes Reserved Instance offerings that are available for purchase. + + :type reserved_instances_offering_ids: list + :param reserved_instances_id: One or more Reserved Instances + offering IDs. + + :type instance_type: str + :param instance_type: Displays Reserved Instances of the specified + instance type. + + :type availability_zone: str + :param availability_zone: Displays Reserved Instances within the + specified Availability Zone. + + :type product_description: str + :param product_description: Displays Reserved Instances with the + specified product description. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type instance_tenancy: string + :param instance_tenancy: The tenancy of the Reserved Instance offering. + A Reserved Instance with tenancy of dedicated will run on + single-tenant hardware and can only be launched within a VPC. + + :type offering_type: string + :param offering_type: The Reserved Instance offering type. Valid + Values: `"Heavy Utilization" | "Medium Utilization" | "Light + Utilization"` + + :type include_marketplace: bool + :param include_marketplace: Include Marketplace offerings in the + response. + + :type min_duration: int :param min_duration: Minimum duration (in + seconds) to filter when searching for offerings. + + :type max_duration: int + :param max_duration: Maximum duration (in seconds) to filter when + searching for offerings. + + :type max_instance_count: int + :param max_instance_count: Maximum number of instances to filter when + searching for offerings. + + :type next_token: string + :param next_token: Token to use when requesting the next paginated set + of offerings. + + :type max_results: int + :param max_results: Maximum number of offerings to return per call. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`. + + """ + params = {} + if reserved_instances_offering_ids is not None: + self.build_list_params(params, reserved_instances_offering_ids, + 'ReservedInstancesOfferingId') + if instance_type: + params['InstanceType'] = instance_type + if availability_zone: + params['AvailabilityZone'] = availability_zone + if product_description: + params['ProductDescription'] = product_description + if filters: + self.build_filter_params(params, filters) + if instance_tenancy is not None: + params['InstanceTenancy'] = instance_tenancy + if offering_type is not None: + params['OfferingType'] = offering_type + if include_marketplace is not None: + if include_marketplace: + params['IncludeMarketplace'] = 'true' + else: + params['IncludeMarketplace'] = 'false' + if min_duration is not None: + params['MinDuration'] = str(min_duration) + if max_duration is not None: + params['MaxDuration'] = str(max_duration) + if max_instance_count is not None: + params['MaxInstanceCount'] = str(max_instance_count) + if next_token is not None: + params['NextToken'] = next_token + if max_results is not None: + params['MaxResults'] = str(max_results) + if dry_run: + params['DryRun'] = 'true' + + return self.get_list('DescribeReservedInstancesOfferings', + params, [('item', ReservedInstancesOffering)], + verb='POST') + + def get_all_reserved_instances(self, reserved_instances_id=None, + filters=None, dry_run=False): + """ + Describes one or more of the Reserved Instances that you purchased. + + :type reserved_instance_ids: list + :param reserved_instance_ids: A list of the reserved instance ids that + will be returned. If not provided, all reserved instances + will be returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` + """ + params = {} + if reserved_instances_id: + self.build_list_params(params, reserved_instances_id, + 'ReservedInstancesId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeReservedInstances', + params, [('item', ReservedInstance)], verb='POST') + + def purchase_reserved_instance_offering(self, + reserved_instances_offering_id, + instance_count=1, limit_price=None, + dry_run=False): + """ + Purchase a Reserved Instance for use with your account. + ** CAUTION ** + This request can result in large amounts of money being charged to your + AWS account. Use with caution! + + :type reserved_instances_offering_id: string + :param reserved_instances_offering_id: The offering ID of the Reserved + Instance to purchase + + :type instance_count: int + :param instance_count: The number of Reserved Instances to purchase. + Default value is 1. + + :type limit_price: tuple + :param instance_count: Limit the price on the total order. + Must be a tuple of (amount, currency_code), for example: + (100.0, 'USD'). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.reservedinstance.ReservedInstance` + :return: The newly created Reserved Instance + """ + params = { + 'ReservedInstancesOfferingId': reserved_instances_offering_id, + 'InstanceCount': instance_count} + if limit_price is not None: + params['LimitPrice.Amount'] = str(limit_price[0]) + params['LimitPrice.CurrencyCode'] = str(limit_price[1]) + if dry_run: + params['DryRun'] = 'true' + return self.get_object('PurchaseReservedInstancesOffering', params, + ReservedInstance, verb='POST') + + def create_reserved_instances_listing(self, reserved_instances_id, + instance_count, price_schedules, + client_token, dry_run=False): + """Creates a new listing for Reserved Instances. + + Creates a new listing for Amazon EC2 Reserved Instances that will be + sold in the Reserved Instance Marketplace. You can submit one Reserved + Instance listing at a time. + + The Reserved Instance Marketplace matches sellers who want to resell + Reserved Instance capacity that they no longer need with buyers who + want to purchase additional capacity. Reserved Instances bought and + sold through the Reserved Instance Marketplace work like any other + Reserved Instances. + + If you want to sell your Reserved Instances, you must first register as + a Seller in the Reserved Instance Marketplace. After completing the + registration process, you can create a Reserved Instance Marketplace + listing of some or all of your Reserved Instances, and specify the + upfront price you want to receive for them. Your Reserved Instance + listings then become available for purchase. + + :type reserved_instances_id: string + :param reserved_instances_id: The ID of the Reserved Instance that + will be listed. + + :type instance_count: int + :param instance_count: The number of instances that are a part of a + Reserved Instance account that will be listed in the Reserved + Instance Marketplace. This number should be less than or equal to + the instance count associated with the Reserved Instance ID + specified in this call. + + :type price_schedules: List of tuples + :param price_schedules: A list specifying the price of the Reserved + Instance for each month remaining in the Reserved Instance term. + Each tuple contains two elements, the price and the term. For + example, for an instance that 11 months remaining in its term, + we can have a price schedule with an upfront price of $2.50. + At 8 months remaining we can drop the price down to $2.00. + This would be expressed as:: + + price_schedules=[('2.50', 11), ('2.00', 8)] + + :type client_token: string + :param client_token: Unique, case-sensitive identifier you provide + to ensure idempotency of the request. Maximum 64 ASCII characters. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstanceListing` + + """ + params = { + 'ReservedInstancesId': reserved_instances_id, + 'InstanceCount': str(instance_count), + 'ClientToken': client_token, + } + for i, schedule in enumerate(price_schedules): + price, term = schedule + params['PriceSchedules.%s.Price' % i] = str(price) + params['PriceSchedules.%s.Term' % i] = str(term) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CreateReservedInstancesListing', + params, [('item', ReservedInstanceListing)], verb='POST') + + def cancel_reserved_instances_listing(self, + reserved_instances_listing_ids=None, + dry_run=False): + """Cancels the specified Reserved Instance listing. + + :type reserved_instances_listing_ids: List of strings + :param reserved_instances_listing_ids: The ID of the + Reserved Instance listing to be cancelled. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of + :class:`boto.ec2.reservedinstance.ReservedInstanceListing` + + """ + params = {} + if reserved_instances_listing_ids is not None: + self.build_list_params(params, reserved_instances_listing_ids, + 'ReservedInstancesListingId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('CancelReservedInstancesListing', + params, [('item', ReservedInstanceListing)], verb='POST') + + def build_configurations_param_list(self, params, target_configurations): + for offset, tc in enumerate(target_configurations): + prefix = 'ReservedInstancesConfigurationSetItemType.%d.' % offset + if tc.availability_zone is not None: + params[prefix + 'AvailabilityZone'] = tc.availability_zone + if tc.platform is not None: + params[prefix + 'Platform'] = tc.platform + if tc.instance_count is not None: + params[prefix + 'InstanceCount'] = tc.instance_count + if tc.instance_type is not None: + params[prefix + 'InstanceType'] = tc.instance_type + + def modify_reserved_instances(self, client_token, reserved_instance_ids, + target_configurations): + """ + Modifies the specified Reserved Instances. + + :type client_token: string + :param client_token: A unique, case-sensitive, token you provide to + ensure idempotency of your modification request. + + :type reserved_instance_ids: List of strings + :param reserved_instance_ids: The IDs of the Reserved Instances to + modify. + + :type target_configurations: List of :class:`boto.ec2.reservedinstance.ReservedInstancesConfiguration` + :param target_configurations: The configuration settings for the + modified Reserved Instances. + + :rtype: string + :return: The unique ID for the submitted modification request. + """ + params = {} + if client_token is not None: + params['ClientToken'] = client_token + if reserved_instance_ids is not None: + self.build_list_params(params, reserved_instance_ids, + 'ReservedInstancesId') + if target_configurations is not None: + self.build_configurations_param_list(params, target_configurations) + mrir = self.get_object( + 'ModifyReservedInstances', + params, + ModifyReservedInstancesResult, + verb='POST' + ) + return mrir.modification_id + + def describe_reserved_instances_modifications(self, + reserved_instances_modification_ids=None, next_token=None, + filters=None): + """ + A request to describe the modifications made to Reserved Instances in + your account. + + :type reserved_instances_modification_ids: list + :param reserved_instances_modification_ids: An optional list of + Reserved Instances modification IDs to describe. + + :type next_token: str + :param next_token: A string specifying the next paginated set + of results to return. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` + """ + params = {} + if reserved_instances_modification_ids: + self.build_list_params(params, reserved_instances_modification_ids, + 'ReservedInstancesModificationId') + if next_token: + params['NextToken'] = next_token + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeReservedInstancesModifications', + params, [('item', ReservedInstancesModification)], + verb='POST') + + # + # Monitoring + # + + def monitor_instances(self, instance_ids, dry_run=False): + """ + Enable detailed CloudWatch monitoring for the supplied instances. + + :type instance_id: list of strings + :param instance_id: The instance ids + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {} + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('MonitorInstances', params, + [('item', InstanceInfo)], verb='POST') + + def monitor_instance(self, instance_id, dry_run=False): + """ + Deprecated Version, maintained for backward compatibility. + Enable detailed CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + return self.monitor_instances([instance_id], dry_run=dry_run) + + def unmonitor_instances(self, instance_ids, dry_run=False): + """ + Disable CloudWatch monitoring for the supplied instance. + + :type instance_id: list of string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {} + self.build_list_params(params, instance_ids, 'InstanceId') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('UnmonitorInstances', params, + [('item', InstanceInfo)], verb='POST') + + def unmonitor_instance(self, instance_id, dry_run=False): + """ + Deprecated Version, maintained for backward compatibility. + Disable detailed CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + return self.unmonitor_instances([instance_id], dry_run=dry_run) + + # + # Bundle Windows Instances + # + + def bundle_instance(self, instance_id, + s3_bucket, + s3_prefix, + s3_upload_policy, dry_run=False): + """ + Bundle Windows instance. + + :type instance_id: string + :param instance_id: The instance id + + :type s3_bucket: string + :param s3_bucket: The bucket in which the AMI should be stored. + + :type s3_prefix: string + :param s3_prefix: The beginning of the file name for the AMI. + + :type s3_upload_policy: string + :param s3_upload_policy: Base64 encoded policy that specifies condition + and permissions for Amazon EC2 to upload the + user's image into Amazon S3. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + + params = {'InstanceId': instance_id, + 'Storage.S3.Bucket': s3_bucket, + 'Storage.S3.Prefix': s3_prefix, + 'Storage.S3.UploadPolicy': s3_upload_policy} + s3auth = boto.auth.get_auth_handler(None, boto.config, + self.provider, ['s3']) + params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id + signature = s3auth.sign_string(s3_upload_policy) + params['Storage.S3.UploadPolicySignature'] = signature + if dry_run: + params['DryRun'] = 'true' + return self.get_object('BundleInstance', params, + BundleInstanceTask, verb='POST') + + def get_all_bundle_tasks(self, bundle_ids=None, filters=None, + dry_run=False): + """ + Retrieve current bundling tasks. If no bundle id is specified, all + tasks are retrieved. + + :type bundle_ids: list + :param bundle_ids: A list of strings containing identifiers for + previously created bundling tasks. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if bundle_ids: + self.build_list_params(params, bundle_ids, 'BundleId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeBundleTasks', params, + [('item', BundleInstanceTask)], verb='POST') + + def cancel_bundle_task(self, bundle_id, dry_run=False): + """ + Cancel a previously submitted bundle task + + :type bundle_id: string + :param bundle_id: The identifier of the bundle task to cancel. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'BundleId': bundle_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CancelBundleTask', params, + BundleInstanceTask, verb='POST') + + def get_password_data(self, instance_id, dry_run=False): + """ + Get encrypted administrator password for a Windows instance. + + :type instance_id: string + :param instance_id: The identifier of the instance to retrieve the + password for. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST') + return rs.passwordData + + # + # Cluster Placement Groups + # + + def get_all_placement_groups(self, groupnames=None, filters=None, + dry_run=False): + """ + Get all placement groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of placement groups to retrieve. + If not provided, all placement groups will be + returned. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.placementgroup.PlacementGroup` + """ + params = {} + if groupnames: + self.build_list_params(params, groupnames, 'GroupName') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribePlacementGroups', params, + [('item', PlacementGroup)], verb='POST') + + def create_placement_group(self, name, strategy='cluster', dry_run=False): + """ + Create a new placement group for your account. + This will create the placement group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new placement group + + :type strategy: string + :param strategy: The placement strategy of the new placement group. + Currently, the only acceptable value is "cluster". + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'GroupName': name, 'Strategy': strategy} + if dry_run: + params['DryRun'] = 'true' + group = self.get_status('CreatePlacementGroup', params, verb='POST') + return group + + def delete_placement_group(self, name, dry_run=False): + """ + Delete a placement group from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'GroupName': name} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeletePlacementGroup', params, verb='POST') + + # Tag methods + + def build_tag_param_list(self, params, tags): + keys = sorted(tags.keys()) + i = 1 + for key in keys: + value = tags[key] + params['Tag.%d.Key' % i] = key + if value is not None: + params['Tag.%d.Value' % i] = value + i += 1 + + def get_all_tags(self, filters=None, dry_run=False, max_results=None): + """ + Retrieve all the metadata tags associated with your account. + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.tag.Tag` objects + """ + params = {} + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + return self.get_list('DescribeTags', params, + [('item', Tag)], verb='POST') + + def create_tags(self, resource_ids, tags, dry_run=False): + """ + Create new metadata tags for the specified resource ids. + + :type resource_ids: list + :param resource_ids: List of strings + + :type tags: dict + :param tags: A dictionary containing the name/value pairs. + If you want to create only a tag name, the + value for that tag should be the empty string + (e.g. ''). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + self.build_list_params(params, resource_ids, 'ResourceId') + self.build_tag_param_list(params, tags) + if dry_run: + params['DryRun'] = 'true' + return self.get_status('CreateTags', params, verb='POST') + + def delete_tags(self, resource_ids, tags, dry_run=False): + """ + Delete metadata tags for the specified resource ids. + + :type resource_ids: list + :param resource_ids: List of strings + + :type tags: dict or list + :param tags: Either a dictionary containing name/value pairs + or a list containing just tag names. + If you pass in a dictionary, the values must + match the actual tag values or the tag will + not be deleted. If you pass in a value of None + for the tag value, all tags with that name will + be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + if isinstance(tags, list): + tags = {}.fromkeys(tags, None) + params = {} + self.build_list_params(params, resource_ids, 'ResourceId') + self.build_tag_param_list(params, tags) + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteTags', params, verb='POST') + + # Network Interface methods + + def get_all_network_interfaces(self, network_interface_ids=None, filters=None, dry_run=False): + """ + Retrieve all of the Elastic Network Interfaces (ENI's) + associated with your account. + + :type network_interface_ids: list + :param network_interface_ids: a list of strings representing ENI IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.ec2.networkinterface.NetworkInterface` + """ + params = {} + if network_interface_ids: + self.build_list_params(params, network_interface_ids, 'NetworkInterfaceId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeNetworkInterfaces', params, + [('item', NetworkInterface)], verb='POST') + + def create_network_interface(self, subnet_id, private_ip_address=None, + description=None, groups=None, dry_run=False): + """ + Creates a network interface in the specified subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with the + network interface. + + :type private_ip_address: str + :param private_ip_address: The private IP address of the + network interface. If not supplied, one will be chosen + for you. + + :type description: str + :param description: The description of the network interface. + + :type groups: list + :param groups: Lists the groups for use by the network interface. + This can be either a list of group ID's or a list of + :class:`boto.ec2.securitygroup.SecurityGroup` objects. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: :class:`boto.ec2.networkinterface.NetworkInterface` + :return: The newly created network interface. + """ + params = {'SubnetId': subnet_id} + if private_ip_address: + params['PrivateIpAddress'] = private_ip_address + if description: + params['Description'] = description + if groups: + ids = [] + for group in groups: + if isinstance(group, SecurityGroup): + ids.append(group.id) + else: + ids.append(group) + self.build_list_params(params, ids, 'SecurityGroupId') + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateNetworkInterface', params, + NetworkInterface, verb='POST') + + def attach_network_interface(self, network_interface_id, + instance_id, device_index, dry_run=False): + """ + Attaches a network interface to an instance. + + :type network_interface_id: str + :param network_interface_id: The ID of the network interface to attach. + + :type instance_id: str + :param instance_id: The ID of the instance that will be attached + to the network interface. + + :type device_index: int + :param device_index: The index of the device for the network + interface attachment on the instance. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'NetworkInterfaceId': network_interface_id, + 'InstanceId': instance_id, + 'DeviceIndex': device_index} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachNetworkInterface', params, verb='POST') + + def detach_network_interface(self, attachment_id, force=False, + dry_run=False): + """ + Detaches a network interface from an instance. + + :type attachment_id: str + :param attachment_id: The ID of the attachment. + + :type force: bool + :param force: Set to true to force a detachment. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'AttachmentId': attachment_id} + if force: + params['Force'] = 'true' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachNetworkInterface', params, verb='POST') + + def delete_network_interface(self, network_interface_id, dry_run=False): + """ + Delete the specified network interface. + + :type network_interface_id: str + :param network_interface_id: The ID of the network interface to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'NetworkInterfaceId': network_interface_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteNetworkInterface', params, verb='POST') + + def get_all_instance_types(self): + """ + Get all instance_types available on this cloud (eucalyptus specific) + + :rtype: list of :class:`boto.ec2.instancetype.InstanceType` + :return: The requested InstanceType objects + """ + params = {} + return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST') + + def copy_image(self, source_region, source_image_id, name=None, + description=None, client_token=None, dry_run=False, + encrypted=None, kms_key_id=None): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + :rtype: :class:`boto.ec2.image.CopyImage` + :return: Object containing the image_id of the copied image. + """ + params = { + 'SourceRegion': source_region, + 'SourceImageId': source_image_id, + } + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if client_token is not None: + params['ClientToken'] = client_token + if encrypted is not None: + params['Encrypted'] = 'true' if encrypted else 'false' + if kms_key_id is not None: + params['KmsKeyId'] = kms_key_id + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CopyImage', params, CopyImage, + verb='POST') + + def describe_account_attributes(self, attribute_names=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + if attribute_names is not None: + self.build_list_params(params, attribute_names, 'AttributeName') + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeAccountAttributes', params, + [('item', AccountAttribute)], verb='POST') + + def describe_vpc_attribute(self, vpc_id, attribute=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'VpcId': vpc_id + } + if attribute is not None: + params['Attribute'] = attribute + if dry_run: + params['DryRun'] = 'true' + return self.get_object('DescribeVpcAttribute', params, + VPCAttribute, verb='POST') + + def modify_vpc_attribute(self, vpc_id, enable_dns_support=None, + enable_dns_hostnames=None, dry_run=False): + """ + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = { + 'VpcId': vpc_id + } + if enable_dns_support is not None: + params['EnableDnsSupport.Value'] = ( + 'true' if enable_dns_support else 'false') + if enable_dns_hostnames is not None: + params['EnableDnsHostnames.Value'] = ( + 'true' if enable_dns_hostnames else 'false') + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVpcAttribute', params, verb='POST') + + def get_all_classic_link_instances(self, instance_ids=None, filters=None, + dry_run=False, max_results=None, + next_token=None): + """ + Get all of your linked EC2-Classic instances. This request only + returns information about EC2-Classic instances linked to + a VPC through ClassicLink + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs. Must be + instances linked to a VPC through ClassicLink. + + :type filters: dict + :param filters: Optional filters that can be used to limit the + results returned. Filters are provided in the form of a + dictionary consisting of filter names as the key and + filter values as the value. The set of allowable filter + names/values is dependent on the request being performed. + Check the EC2 API guide for details. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type max_results: int + :param max_results: The maximum number of paginated instance + items per response. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Instance` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + if max_results is not None: + params['MaxResults'] = max_results + if next_token: + params['NextToken'] = next_token + return self.get_list('DescribeClassicLinkInstances', params, + [('item', Instance)], verb='POST') diff --git a/ext/boto/ec2/ec2object.py b/ext/boto/ec2/ec2object.py new file mode 100644 index 0000000000..fa50a9fcc7 --- /dev/null +++ b/ext/boto/ec2/ec2object.py @@ -0,0 +1,144 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Object +""" +from boto.ec2.tag import TagSet + + +class EC2Object(object): + + def __init__(self, connection=None): + self.connection = connection + if self.connection and hasattr(self.connection, 'region'): + self.region = connection.region + else: + self.region = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class TaggedEC2Object(EC2Object): + """ + Any EC2 resource that can be tagged should be represented + by a Python object that subclasses this class. This class + has the mechanism in place to handle the tagSet element in + the Describe* responses. If tags are found, it will create + a TagSet object and allow it to parse and collect the tags + into a dict that is stored in the "tags" attribute of the + object. + """ + + def __init__(self, connection=None): + super(TaggedEC2Object, self).__init__(connection) + self.tags = TagSet() + + def startElement(self, name, attrs, connection): + if name == 'tagSet': + return self.tags + else: + return None + + def add_tag(self, key, value='', dry_run=False): + """ + Add a tag to this object. Tags are stored by AWS and can be used + to organize and filter resources. Adding a tag involves a round-trip + to the EC2 service. + + :type key: str + :param key: The key or name of the tag being stored. + + :type value: str + :param value: An optional value that can be stored with the tag. + If you want only the tag name and no value, the + value should be the empty string. + """ + self.add_tags({key: value}, dry_run) + + def add_tags(self, tags, dry_run=False): + """ + Add tags to this object. Tags are stored by AWS and can be used + to organize and filter resources. Adding tags involves a round-trip + to the EC2 service. + + :type tags: dict + :param tags: A dictionary of key-value pairs for the tags being stored. + If for some tags you want only the name and no value, the + corresponding value for that tag name should be an empty + string. + """ + status = self.connection.create_tags( + [self.id], + tags, + dry_run=dry_run + ) + if self.tags is None: + self.tags = TagSet() + self.tags.update(tags) + + def remove_tag(self, key, value=None, dry_run=False): + """ + Remove a tag from this object. Removing a tag involves a round-trip + to the EC2 service. + + :type key: str + :param key: The key or name of the tag being stored. + + :type value: str + :param value: An optional value that can be stored with the tag. + If a value is provided, it must match the value currently + stored in EC2. If not, the tag will not be removed. If + a value of None is provided, the tag will be + unconditionally deleted. + NOTE: There is an important distinction between a value + of '' and a value of None. + """ + self.remove_tags({key: value}, dry_run) + + def remove_tags(self, tags, dry_run=False): + """ + Removes tags from this object. Removing tags involves a round-trip + to the EC2 service. + + :type tags: dict + :param tags: A dictionary of key-value pairs for the tags being removed. + For each key, the provided value must match the value + currently stored in EC2. If not, that particular tag will + not be removed. However, if a value of None is provided, + the tag will be unconditionally deleted. + NOTE: There is an important distinction between a value of + '' and a value of None. + """ + status = self.connection.delete_tags( + [self.id], + tags, + dry_run=dry_run + ) + for key, value in tags.items(): + if key in self.tags: + if value is None or value == self.tags[key]: + del self.tags[key] diff --git a/ext/boto/ec2/elb/__init__.py b/ext/boto/ec2/elb/__init__.py new file mode 100644 index 0000000000..cc0daa5358 --- /dev/null +++ b/ext/boto/ec2/elb/__init__.py @@ -0,0 +1,757 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +load balancing service from AWS. +""" +from boto.connection import AWSQueryConnection +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones +from boto.ec2.elb.instancestate import InstanceState +from boto.ec2.elb.healthcheck import HealthCheck +from boto.regioninfo import RegionInfo, get_regions, load_regions +from boto.regioninfo import connect +import boto +from boto.compat import six + +RegionData = load_regions().get('elasticloadbalancing', {}) + + +def regions(): + """ + Get all available regions for the ELB service. + + :rtype: list + :return: A list of :class:`boto.RegionInfo` instances + """ + return get_regions('elasticloadbalancing', connection_cls=ELBConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ec2.elb.ELBConnection`. + + :param str region_name: The name of the region to connect to. + + :rtype: :class:`boto.ec2.ELBConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('elasticloadbalancing', region_name, + connection_cls=ELBConnection, **kw_params) + + +class ELBConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01') + DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get( + 'Boto', 'elb_region_endpoint', + 'elasticloadbalancing.us-east-1.amazonaws.com') + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + """ + Init method to create a new connection to EC2 Load Balancing Service. + + .. note:: The region argument is overridden by the region specified in + the boto configuration file. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(ELBConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def build_list_params(self, params, items, label): + if isinstance(items, six.string_types): + items = [items] + for index, item in enumerate(items): + params[label % (index + 1)] = item + + def get_all_load_balancers(self, load_balancer_names=None, marker=None): + """ + Retrieve all load balancers associated with your account. + + :type load_balancer_names: list + :keyword load_balancer_names: An optional list of load balancer names. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :rtype: :py:class:`boto.resultset.ResultSet` + :return: A ResultSet containing instances of + :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + params = {} + if load_balancer_names: + self.build_list_params(params, load_balancer_names, + 'LoadBalancerNames.member.%d') + + if marker: + params['Marker'] = marker + + return self.get_list('DescribeLoadBalancers', params, + [('member', LoadBalancer)]) + + def create_load_balancer(self, name, zones, listeners=None, subnets=None, + security_groups=None, scheme='internet-facing', + complex_listeners=None): + """ + Create a new load balancer for your account. By default the load + balancer will be created in EC2. To create a load balancer inside a + VPC, parameter zones must be set to None and subnets must not be None. + The load balancer will be automatically created under the VPC that + contains the subnet(s) specified. + + :type name: string + :param name: The mnemonic name associated with the new load balancer + + :type zones: List of strings + :param zones: The names of the availability zone(s) to add. + + :type listeners: List of tuples + :param listeners: Each tuple contains three or four values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + [SSLCertificateId]) where LoadBalancerPortNumber and + InstancePortNumber are integer values between 1 and 65535, + Protocol is a string containing either 'TCP', 'SSL', HTTP', or + 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM + certificate, and must be specified when doing HTTPS. + + :type subnets: list of strings + :param subnets: A list of subnet IDs in your VPC to attach to + your LoadBalancer. + + :type security_groups: list of strings + :param security_groups: The security groups assigned to your + LoadBalancer within your VPC. + + :type scheme: string + :param scheme: The type of a LoadBalancer. By default, Elastic + Load Balancing creates an internet-facing LoadBalancer with + a publicly resolvable DNS name, which resolves to public IP + addresses. + + Specify the value internal for this option to create an + internal LoadBalancer with a DNS name that resolves to + private IP addresses. + + This option is only available for LoadBalancers attached + to an Amazon VPC. + + :type complex_listeners: List of tuples + :param complex_listeners: Each tuple contains four or five values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + InstanceProtocol, SSLCertificateId). + + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing + either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM + + :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + :return: The newly created + :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + if not listeners and not complex_listeners: + # Must specify one of the two options + return None + + params = {'LoadBalancerName': name, + 'Scheme': scheme} + + # Handle legacy listeners + if listeners: + for index, listener in enumerate(listeners): + i = index + 1 + protocol = listener[2].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] + + # Handle the full listeners + if complex_listeners: + for index, listener in enumerate(complex_listeners): + i = index + 1 + protocol = listener[2].upper() + InstanceProtocol = listener[3].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] + + if zones: + self.build_list_params(params, zones, 'AvailabilityZones.member.%d') + + if subnets: + self.build_list_params(params, subnets, 'Subnets.member.%d') + + if security_groups: + self.build_list_params(params, security_groups, + 'SecurityGroups.member.%d') + + load_balancer = self.get_object('CreateLoadBalancer', + params, LoadBalancer) + load_balancer.name = name + load_balancer.listeners = listeners + load_balancer.availability_zones = zones + load_balancer.subnets = subnets + load_balancer.security_groups = security_groups + return load_balancer + + def create_load_balancer_listeners(self, name, listeners=None, + complex_listeners=None): + """ + Creates a Listener (or group of listeners) for an existing + Load Balancer + + :type name: string + :param name: The name of the load balancer to create the listeners for + + :type listeners: List of tuples + :param listeners: Each tuple contains three or four values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + [SSLCertificateId]) where LoadBalancerPortNumber and + InstancePortNumber are integer values between 1 and 65535, + Protocol is a string containing either 'TCP', 'SSL', HTTP', or + 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM + certificate, and must be specified when doing HTTPS. + + :type complex_listeners: List of tuples + :param complex_listeners: Each tuple contains four or five values, + (LoadBalancerPortNumber, InstancePortNumber, Protocol, + InstanceProtocol, SSLCertificateId). + + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing + either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM + + :return: The status of the request + """ + if not listeners and not complex_listeners: + # Must specify one of the two options + return None + + params = {'LoadBalancerName': name} + + # Handle the simple listeners + if listeners: + for index, listener in enumerate(listeners): + i = index + 1 + protocol = listener[2].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] + + # Handle the full listeners + if complex_listeners: + for index, listener in enumerate(complex_listeners): + i = index + 1 + protocol = listener[2].upper() + InstanceProtocol = listener[3].upper() + params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] + params['Listeners.member.%d.InstancePort' % i] = listener[1] + params['Listeners.member.%d.Protocol' % i] = listener[2] + params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] + if protocol == 'HTTPS' or protocol == 'SSL': + params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] + + return self.get_status('CreateLoadBalancerListeners', params) + + def delete_load_balancer(self, name): + """ + Delete a Load Balancer from your account. + + :type name: string + :param name: The name of the Load Balancer to delete + """ + params = {'LoadBalancerName': name} + return self.get_status('DeleteLoadBalancer', params) + + def delete_load_balancer_listeners(self, name, ports): + """ + Deletes a load balancer listener (or group of listeners) + + :type name: string + :param name: The name of the load balancer to create the listeners for + + :type ports: List int + :param ports: Each int represents the port on the ELB to be removed + + :return: The status of the request + """ + params = {'LoadBalancerName': name} + for index, port in enumerate(ports): + params['LoadBalancerPorts.member.%d' % (index + 1)] = port + return self.get_status('DeleteLoadBalancerListeners', params) + + def enable_availability_zones(self, load_balancer_name, zones_to_add): + """ + Add availability zones to an existing Load Balancer + All zones must be in the same region as the Load Balancer + Adding zones that are already registered with the Load Balancer + has no effect. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to add. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, zones_to_add, + 'AvailabilityZones.member.%d') + obj = self.get_object('EnableAvailabilityZonesForLoadBalancer', + params, LoadBalancerZones) + return obj.zones + + def disable_availability_zones(self, load_balancer_name, zones_to_remove): + """ + Remove availability zones from an existing Load Balancer. + All zones must be in the same region as the Load Balancer. + Removing zones that are not registered with the Load Balancer + has no effect. + You cannot remove all zones from an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to remove. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, zones_to_remove, + 'AvailabilityZones.member.%d') + obj = self.get_object('DisableAvailabilityZonesForLoadBalancer', + params, LoadBalancerZones) + return obj.zones + + def modify_lb_attribute(self, load_balancer_name, attribute, value): + """Changes an attribute of a Load Balancer + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type attribute: string + :param attribute: The attribute you wish to change. + + * crossZoneLoadBalancing - Boolean (true) + * connectingSettings - :py:class:`ConnectionSettingAttribute` instance + * accessLog - :py:class:`AccessLogAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + """ + + bool_reqs = ('crosszoneloadbalancing',) + if attribute.lower() in bool_reqs: + if isinstance(value, bool): + if value: + value = 'true' + else: + value = 'false' + + params = {'LoadBalancerName': load_balancer_name} + if attribute.lower() == 'crosszoneloadbalancing': + params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled' + ] = value + elif attribute.lower() == 'accesslog': + params['LoadBalancerAttributes.AccessLog.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \ + value.s3_bucket_name + params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \ + value.s3_bucket_prefix + params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ + value.emit_interval + elif attribute.lower() == 'connectiondraining': + params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \ + value.enabled and 'true' or 'false' + params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \ + value.timeout + elif attribute.lower() == 'connectingsettings': + params['LoadBalancerAttributes.ConnectionSettings.IdleTimeout'] = \ + value.idle_timeout + else: + raise ValueError('InvalidAttribute', attribute) + return self.get_status('ModifyLoadBalancerAttributes', params, + verb='GET') + + def get_all_lb_attributes(self, load_balancer_name): + """Gets all Attributes of a Load Balancer + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :rtype: boto.ec2.elb.attribute.LbAttributes + :return: The attribute object of the ELB. + """ + from boto.ec2.elb.attributes import LbAttributes + params = {'LoadBalancerName': load_balancer_name} + return self.get_object('DescribeLoadBalancerAttributes', + params, LbAttributes) + + def get_lb_attribute(self, load_balancer_name, attribute): + """Gets an attribute of a Load Balancer + + This will make an EC2 call for each method call. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type attribute: string + :param attribute: The attribute you wish to see. + + * accessLog - :py:class:`AccessLogAttribute` instance + * crossZoneLoadBalancing - Boolean + * connectingSettings - :py:class:`ConnectionSettingAttribute` instance + * connectionDraining - :py:class:`ConnectionDrainingAttribute` + instance + + :rtype: Attribute dependent + :return: The new value for the attribute + """ + attributes = self.get_all_lb_attributes(load_balancer_name) + if attribute.lower() == 'accesslog': + return attributes.access_log + if attribute.lower() == 'crosszoneloadbalancing': + return attributes.cross_zone_load_balancing.enabled + if attribute.lower() == 'connectiondraining': + return attributes.connection_draining + if attribute.lower() == 'connectingsettings': + return attributes.connecting_settings + return None + + def register_instances(self, load_balancer_name, instances): + """ + Add new Instances to an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to add. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('RegisterInstancesWithLoadBalancer', + params, [('member', InstanceInfo)]) + + def deregister_instances(self, load_balancer_name, instances): + """ + Remove Instances from an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to remove. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('DeregisterInstancesFromLoadBalancer', + params, [('member', InstanceInfo)]) + + def describe_instance_health(self, load_balancer_name, instances=None): + """ + Get current state of all Instances registered to an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances + to return status for. If not provided, + the state of all instances will be returned. + + :rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState` + :return: list of state info for instances in this Load Balancer. + + """ + params = {'LoadBalancerName': load_balancer_name} + if instances: + self.build_list_params(params, instances, + 'Instances.member.%d.InstanceId') + return self.get_list('DescribeInstanceHealth', params, + [('member', InstanceState)]) + + def configure_health_check(self, name, health_check): + """ + Define a health check for the EndPoints. + + :type name: string + :param name: The mnemonic name associated with the load balancer + + :type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :param health_check: A HealthCheck object populated with the desired + values. + + :rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck` + """ + params = {'LoadBalancerName': name, + 'HealthCheck.Timeout': health_check.timeout, + 'HealthCheck.Target': health_check.target, + 'HealthCheck.Interval': health_check.interval, + 'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold, + 'HealthCheck.HealthyThreshold': health_check.healthy_threshold} + return self.get_object('ConfigureHealthCheck', params, HealthCheck) + + def set_lb_listener_SSL_certificate(self, lb_name, lb_port, + ssl_certificate_id): + """ + Sets the certificate that terminates the specified listener's SSL + connections. The specified certificate replaces any prior certificate + that was used on the same LoadBalancer and port. + """ + params = {'LoadBalancerName': lb_name, + 'LoadBalancerPort': lb_port, + 'SSLCertificateId': ssl_certificate_id} + return self.get_status('SetLoadBalancerListenerSSLCertificate', params) + + def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name): + """ + Generates a stickiness policy with sticky session lifetimes that follow + that of an application-generated cookie. This policy can only be + associated with HTTP listeners. + + This policy is similar to the policy created by + CreateLBCookieStickinessPolicy, except that the lifetime of the special + Elastic Load Balancing cookie follows the lifetime of the + application-generated cookie specified in the policy configuration. The + load balancer only inserts a new stickiness cookie when the application + response includes a new application cookie. + + If the application cookie is explicitly removed or expires, the session + stops being sticky until a new application cookie is issued. + """ + params = {'CookieName': name, + 'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + return self.get_status('CreateAppCookieStickinessPolicy', params) + + def create_lb_cookie_stickiness_policy(self, cookie_expiration_period, + lb_name, policy_name): + """ + Generates a stickiness policy with sticky session lifetimes controlled + by the lifetime of the browser (user-agent) or a specified expiration + period. This policy can only be associated only with HTTP listeners. + + When a load balancer implements this policy, the load balancer uses a + special cookie to track the backend server instance for each request. + When the load balancer receives a request, it first checks to see if + this cookie is present in the request. If so, the load balancer sends + the request to the application server specified in the cookie. If not, + the load balancer sends the request to a server that is chosen based on + the existing load balancing algorithm. + + A cookie is inserted into the response for binding subsequent requests + from the same user to that server. The validity of the cookie is based + on the cookie expiration time, which is specified in the policy + configuration. + + None may be passed for cookie_expiration_period. + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + if cookie_expiration_period is not None: + params['CookieExpirationPeriod'] = cookie_expiration_period + return self.get_status('CreateLBCookieStickinessPolicy', params) + + def create_lb_policy(self, lb_name, policy_name, policy_type, + policy_attributes): + """ + Creates a new policy that contains the necessary attributes + depending on the policy type. Policies are settings that are + saved for your load balancer and that can be applied to the + front-end listener, or the back-end application server. + + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name, + 'PolicyTypeName': policy_type} + for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1): + params['PolicyAttributes.member.%d.AttributeName' % index] = name + params['PolicyAttributes.member.%d.AttributeValue' % index] = value + else: + params['PolicyAttributes'] = '' + return self.get_status('CreateLoadBalancerPolicy', params) + + def delete_lb_policy(self, lb_name, policy_name): + """ + Deletes a policy from the LoadBalancer. The specified policy must not + be enabled for any listeners. + """ + params = {'LoadBalancerName': lb_name, + 'PolicyName': policy_name} + return self.get_status('DeleteLoadBalancerPolicy', params) + + def set_lb_policies_of_listener(self, lb_name, lb_port, policies): + """ + Associates, updates, or disables a policy with a listener on the load + balancer. Currently only zero (0) or one (1) policy can be associated + with a listener. + """ + params = {'LoadBalancerName': lb_name, + 'LoadBalancerPort': lb_port} + if len(policies): + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' + return self.get_status('SetLoadBalancerPoliciesOfListener', params) + + def set_lb_policies_of_backend_server(self, lb_name, instance_port, + policies): + """ + Replaces the current set of policies associated with a port on which + the back-end server is listening with a new set of policies. + """ + params = {'LoadBalancerName': lb_name, + 'InstancePort': instance_port} + if policies: + self.build_list_params(params, policies, 'PolicyNames.member.%d') + else: + params['PolicyNames'] = '' + return self.get_status('SetLoadBalancerPoliciesForBackendServer', + params) + + def apply_security_groups_to_lb(self, name, security_groups): + """ + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. + + :type name: string + :param name: The name of the Load Balancer + + :type security_groups: List of strings + :param security_groups: The name of the security group(s) to add. + + :rtype: List of strings + :return: An updated list of security groups for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, security_groups, + 'SecurityGroups.member.%d') + return self.get_list('ApplySecurityGroupsToLoadBalancer', + params, None) + + def attach_lb_to_subnets(self, name, subnets): + """ + Attaches load balancer to one or more subnets. + Attaching subnets that are already registered with the + Load Balancer has no effect. + + :type name: string + :param name: The name of the Load Balancer + + :type subnets: List of strings + :param subnets: The name of the subnet(s) to add. + + :rtype: List of strings + :return: An updated list of subnets for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, subnets, + 'Subnets.member.%d') + return self.get_list('AttachLoadBalancerToSubnets', + params, None) + + def detach_lb_from_subnets(self, name, subnets): + """ + Detaches load balancer from one or more subnets. + + :type name: string + :param name: The name of the Load Balancer + + :type subnets: List of strings + :param subnets: The name of the subnet(s) to detach. + + :rtype: List of strings + :return: An updated list of subnets for this Load Balancer. + + """ + params = {'LoadBalancerName': name} + self.build_list_params(params, subnets, + 'Subnets.member.%d') + return self.get_list('DetachLoadBalancerFromSubnets', + params, None) diff --git a/ext/boto/ec2/elb/attributes.py b/ext/boto/ec2/elb/attributes.py new file mode 100644 index 0000000000..605e5d54a7 --- /dev/null +++ b/ext/boto/ec2/elb/attributes.py @@ -0,0 +1,154 @@ +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Created by Chris Huegle for TellApart, Inc. + +class ConnectionSettingAttribute(object): + """ + Represents the ConnectionSetting segment of ELB Attributes. + """ + def __init__(self, connection=None): + self.idle_timeout = None + + def __repr__(self): + return 'ConnectionSettingAttribute(%s)' % ( + self.idle_timeout) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'IdleTimeout': + self.idle_timeout = int(value) + +class CrossZoneLoadBalancingAttribute(object): + """ + Represents the CrossZoneLoadBalancing segement of ELB Attributes. + """ + def __init__(self, connection=None): + self.enabled = None + + def __repr__(self): + return 'CrossZoneLoadBalancingAttribute(%s)' % ( + self.enabled) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + + +class AccessLogAttribute(object): + """ + Represents the AccessLog segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.s3_bucket_name = None + self.s3_bucket_prefix = None + self.emit_interval = None + + def __repr__(self): + return 'AccessLog(%s, %s, %s, %s)' % ( + self.enabled, + self.s3_bucket_name, + self.s3_bucket_prefix, + self.emit_interval + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'S3BucketName': + self.s3_bucket_name = value + elif name == 'S3BucketPrefix': + self.s3_bucket_prefix = value + elif name == 'EmitInterval': + self.emit_interval = int(value) + + +class ConnectionDrainingAttribute(object): + """ + Represents the ConnectionDraining segment of ELB attributes. + """ + def __init__(self, connection=None): + self.enabled = None + self.timeout = None + + def __repr__(self): + return 'ConnectionDraining(%s, %s)' % ( + self.enabled, + self.timeout + ) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'Timeout': + self.timeout = int(value) + + +class LbAttributes(object): + """ + Represents the Attributes of an Elastic Load Balancer. + """ + def __init__(self, connection=None): + self.connection = connection + self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute( + self.connection) + self.access_log = AccessLogAttribute(self.connection) + self.connection_draining = ConnectionDrainingAttribute(self.connection) + self.connecting_settings = ConnectionSettingAttribute(self.connection) + + def __repr__(self): + return 'LbAttributes(%s, %s, %s, %s)' % ( + repr(self.cross_zone_load_balancing), + repr(self.access_log), + repr(self.connection_draining), + repr(self.connecting_settings)) + + def startElement(self, name, attrs, connection): + if name == 'CrossZoneLoadBalancing': + return self.cross_zone_load_balancing + if name == 'AccessLog': + return self.access_log + if name == 'ConnectionDraining': + return self.connection_draining + if name == 'ConnectionSettings': + return self.connecting_settings + + def endElement(self, name, value, connection): + pass diff --git a/ext/boto/ec2/elb/healthcheck.py b/ext/boto/ec2/elb/healthcheck.py new file mode 100644 index 0000000000..040f962300 --- /dev/null +++ b/ext/boto/ec2/elb/healthcheck.py @@ -0,0 +1,89 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class HealthCheck(object): + """ + Represents an EC2 Access Point Health Check. See + :ref:`elb-configuring-a-health-check` for a walkthrough on configuring + load balancer health checks. + """ + def __init__(self, access_point=None, interval=30, target=None, + healthy_threshold=3, timeout=5, unhealthy_threshold=5): + """ + :ivar str access_point: The name of the load balancer this + health check is associated with. + :ivar int interval: Specifies how many seconds there are between + health checks. + :ivar str target: Determines what to check on an instance. See the + Amazon HealthCheck_ documentation for possible Target values. + + .. _HealthCheck: http://docs.amazonwebservices.com/ElasticLoadBalancing/latest/APIReference/API_HealthCheck.html + """ + self.access_point = access_point + self.interval = interval + self.target = target + self.healthy_threshold = healthy_threshold + self.timeout = timeout + self.unhealthy_threshold = unhealthy_threshold + + def __repr__(self): + return 'HealthCheck:%s' % self.target + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Interval': + self.interval = int(value) + elif name == 'Target': + self.target = value + elif name == 'HealthyThreshold': + self.healthy_threshold = int(value) + elif name == 'Timeout': + self.timeout = int(value) + elif name == 'UnhealthyThreshold': + self.unhealthy_threshold = int(value) + else: + setattr(self, name, value) + + def update(self): + """ + In the case where you have accessed an existing health check on a + load balancer, this method applies this instance's health check + values to the load balancer it is attached to. + + .. note:: This method will not do anything if the :py:attr:`access_point` + attribute isn't set, as is the case with a newly instantiated + HealthCheck instance. + """ + if not self.access_point: + return + + new_hc = self.connection.configure_health_check(self.access_point, + self) + self.interval = new_hc.interval + self.target = new_hc.target + self.healthy_threshold = new_hc.healthy_threshold + self.unhealthy_threshold = new_hc.unhealthy_threshold + self.timeout = new_hc.timeout diff --git a/ext/boto/ec2/elb/instancestate.py b/ext/boto/ec2/elb/instancestate.py new file mode 100644 index 0000000000..dd61c123ce --- /dev/null +++ b/ext/boto/ec2/elb/instancestate.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceState(object): + """ + Represents the state of an EC2 Load Balancer Instance + """ + + def __init__(self, load_balancer=None, description=None, + state=None, instance_id=None, reason_code=None): + """ + :ivar boto.ec2.elb.loadbalancer.LoadBalancer load_balancer: The + load balancer this instance is registered to. + :ivar str description: A description of the instance. + :ivar str instance_id: The EC2 instance ID. + :ivar str reason_code: Provides information about the cause of + an OutOfService instance. Specifically, it indicates whether the + cause is Elastic Load Balancing or the instance behind the + LoadBalancer. + :ivar str state: Specifies the current state of the instance. + """ + self.load_balancer = load_balancer + self.description = description + self.state = state + self.instance_id = instance_id + self.reason_code = reason_code + + def __repr__(self): + return 'InstanceState:(%s,%s)' % (self.instance_id, self.state) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Description': + self.description = value + elif name == 'State': + self.state = value + elif name == 'InstanceId': + self.instance_id = value + elif name == 'ReasonCode': + self.reason_code = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/elb/listelement.py b/ext/boto/ec2/elb/listelement.py new file mode 100644 index 0000000000..0fe3a1e8eb --- /dev/null +++ b/ext/boto/ec2/elb/listelement.py @@ -0,0 +1,36 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class ListElement(list): + """ + A :py:class:`list` subclass that has some additional methods + for interacting with Amazon's XML API. + """ + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) diff --git a/ext/boto/ec2/elb/listener.py b/ext/boto/ec2/elb/listener.py new file mode 100644 index 0000000000..ed33b131a6 --- /dev/null +++ b/ext/boto/ec2/elb/listener.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.listelement import ListElement + + +class Listener(object): + """ + Represents an EC2 Load Balancer Listener tuple + """ + + def __init__(self, load_balancer=None, load_balancer_port=0, + instance_port=0, protocol='', ssl_certificate_id=None, instance_protocol=None): + self.load_balancer = load_balancer + self.load_balancer_port = load_balancer_port + self.instance_port = instance_port + self.protocol = protocol + self.instance_protocol = instance_protocol + self.ssl_certificate_id = ssl_certificate_id + self.policy_names = ListElement() + + def __repr__(self): + r = "(%d, %d, '%s'" % (self.load_balancer_port, self.instance_port, self.protocol) + if self.instance_protocol: + r += ", '%s'" % self.instance_protocol + if self.ssl_certificate_id: + r += ', %s' % (self.ssl_certificate_id) + r += ')' + return r + + def startElement(self, name, attrs, connection): + if name == 'PolicyNames': + return self.policy_names + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerPort': + self.load_balancer_port = int(value) + elif name == 'InstancePort': + self.instance_port = int(value) + elif name == 'InstanceProtocol': + self.instance_protocol = value + elif name == 'Protocol': + self.protocol = value + elif name == 'SSLCertificateId': + self.ssl_certificate_id = value + else: + setattr(self, name, value) + + def get_tuple(self): + return self.load_balancer_port, self.instance_port, self.protocol + + def get_complex_tuple(self): + return self.load_balancer_port, self.instance_port, self.protocol, self.instance_protocol + + def __getitem__(self, key): + if key == 0: + return self.load_balancer_port + if key == 1: + return self.instance_port + if key == 2: + return self.protocol + if key == 3: + return self.instance_protocol + if key == 4: + return self.ssl_certificate_id + raise KeyError diff --git a/ext/boto/ec2/elb/loadbalancer.py b/ext/boto/ec2/elb/loadbalancer.py new file mode 100644 index 0000000000..501544941f --- /dev/null +++ b/ext/boto/ec2/elb/loadbalancer.py @@ -0,0 +1,419 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.healthcheck import HealthCheck +from boto.ec2.elb.listener import Listener +from boto.ec2.elb.listelement import ListElement +from boto.ec2.elb.policies import Policies, OtherPolicy +from boto.ec2.elb.securitygroup import SecurityGroup +from boto.ec2.instanceinfo import InstanceInfo +from boto.resultset import ResultSet +from boto.compat import six + + +class Backend(object): + """Backend server description""" + + def __init__(self, connection=None): + self.connection = connection + self.instance_port = None + self.policies = None + + def __repr__(self): + return 'Backend(%r:%r)' % (self.instance_port, self.policies) + + def startElement(self, name, attrs, connection): + if name == 'PolicyNames': + self.policies = ResultSet([('member', OtherPolicy)]) + return self.policies + + def endElement(self, name, value, connection): + if name == 'InstancePort': + self.instance_port = int(value) + return + + +class LoadBalancerZones(object): + """ + Used to collect the zones for a Load Balancer when enable_zones + or disable_zones are called. + """ + def __init__(self, connection=None): + self.connection = connection + self.zones = ListElement() + + def startElement(self, name, attrs, connection): + if name == 'AvailabilityZones': + return self.zones + + def endElement(self, name, value, connection): + pass + + +class LoadBalancer(object): + """ + Represents an EC2 Load Balancer. + """ + + def __init__(self, connection=None, name=None, endpoints=None): + """ + :ivar boto.ec2.elb.ELBConnection connection: The connection this load + balancer was instance was instantiated from. + :ivar list listeners: A list of tuples in the form of + ``(, , )`` + :ivar boto.ec2.elb.healthcheck.HealthCheck health_check: The health + check policy for this load balancer. + :ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and + other policies. + :ivar str name: The name of the Load Balancer. + :ivar str dns_name: The external DNS name for the balancer. + :ivar str created_time: A date+time string showing when the + load balancer was created. + :ivar list instances: A list of :py:class:`boto.ec2.instanceinfo.InstanceInfo` + instances, representing the EC2 instances this load balancer is + distributing requests to. + :ivar list availability_zones: The availability zones this balancer + covers. + :ivar str canonical_hosted_zone_name: Current CNAME for the balancer. + :ivar str canonical_hosted_zone_name_id: The Route 53 hosted zone + ID of this balancer. Needed when creating an Alias record in a + Route 53 hosted zone. + :ivar boto.ec2.elb.securitygroup.SecurityGroup source_security_group: + The security group that you can use as part of your inbound rules + for your load balancer back-end instances to disallow traffic + from sources other than your load balancer. + :ivar list subnets: A list of subnets this balancer is on. + :ivar list security_groups: A list of additional security groups that + have been applied. + :ivar str vpc_id: The ID of the VPC that this ELB resides within. + :ivar list backends: A list of :py:class:`boto.ec2.elb.loadbalancer.Backend + back-end server descriptions. + """ + self.connection = connection + self.name = name + self.listeners = None + self.health_check = None + self.policies = None + self.dns_name = None + self.created_time = None + self.instances = None + self.availability_zones = ListElement() + self.canonical_hosted_zone_name = None + self.canonical_hosted_zone_name_id = None + self.source_security_group = None + self.subnets = ListElement() + self.security_groups = ListElement() + self.vpc_id = None + self.scheme = None + self.backends = None + self._attributes = None + + def __repr__(self): + return 'LoadBalancer:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'HealthCheck': + self.health_check = HealthCheck(self) + return self.health_check + elif name == 'ListenerDescriptions': + self.listeners = ResultSet([('member', Listener)]) + return self.listeners + elif name == 'AvailabilityZones': + return self.availability_zones + elif name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + elif name == 'Policies': + self.policies = Policies(self) + return self.policies + elif name == 'SourceSecurityGroup': + self.source_security_group = SecurityGroup() + return self.source_security_group + elif name == 'Subnets': + return self.subnets + elif name == 'SecurityGroups': + return self.security_groups + elif name == 'VPCId': + pass + elif name == "BackendServerDescriptions": + self.backends = ResultSet([('member', Backend)]) + return self.backends + else: + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerName': + self.name = value + elif name == 'DNSName': + self.dns_name = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'InstanceId': + self.instances.append(value) + elif name == 'CanonicalHostedZoneName': + self.canonical_hosted_zone_name = value + elif name == 'CanonicalHostedZoneNameID': + self.canonical_hosted_zone_name_id = value + elif name == 'VPCId': + self.vpc_id = value + elif name == 'Scheme': + self.scheme = value + else: + setattr(self, name, value) + + def enable_zones(self, zones): + """ + Enable availability zones to this Access Point. + All zones must be in the same region as the Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, six.string_types): + zones = [zones] + new_zones = self.connection.enable_availability_zones(self.name, zones) + self.availability_zones = new_zones + + def disable_zones(self, zones): + """ + Disable availability zones from this Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, six.string_types): + zones = [zones] + new_zones = self.connection.disable_availability_zones( + self.name, zones) + self.availability_zones = new_zones + + def get_attributes(self, force=False): + """ + Gets the LbAttributes. The Attributes will be cached. + + :type force: bool + :param force: Ignore cache value and reload. + + :rtype: boto.ec2.elb.attributes.LbAttributes + :return: The LbAttribues object + """ + if not self._attributes or force: + self._attributes = self.connection.get_all_lb_attributes(self.name) + return self._attributes + + def is_cross_zone_load_balancing(self, force=False): + """ + Identifies if the ELB is current configured to do CrossZone Balancing. + + :type force: bool + :param force: Ignore cache value and reload. + + :rtype: bool + :return: True if balancing is enabled, False if not. + """ + return self.get_attributes(force).cross_zone_load_balancing.enabled + + def enable_cross_zone_load_balancing(self): + """ + Turns on CrossZone Load Balancing for this ELB. + + :rtype: bool + :return: True if successful, False if not. + """ + success = self.connection.modify_lb_attribute( + self.name, 'crossZoneLoadBalancing', True) + if success and self._attributes: + self._attributes.cross_zone_load_balancing.enabled = True + return success + + def disable_cross_zone_load_balancing(self): + """ + Turns off CrossZone Load Balancing for this ELB. + + :rtype: bool + :return: True if successful, False if not. + """ + success = self.connection.modify_lb_attribute( + self.name, 'crossZoneLoadBalancing', False) + if success and self._attributes: + self._attributes.cross_zone_load_balancing.enabled = False + return success + + def register_instances(self, instances): + """ + Adds instances to this load balancer. All instances must be in the same + region as the load balancer. Adding endpoints that are already + registered with the load balancer has no effect. + + :param list instances: List of instance IDs (strings) that you'd like + to add to this load balancer. + + """ + if isinstance(instances, six.string_types): + instances = [instances] + new_instances = self.connection.register_instances(self.name, + instances) + self.instances = new_instances + + def deregister_instances(self, instances): + """ + Remove instances from this load balancer. Removing instances that are + not registered with the load balancer has no effect. + + :param list instances: List of instance IDs (strings) that you'd like + to remove from this load balancer. + + """ + if isinstance(instances, six.string_types): + instances = [instances] + new_instances = self.connection.deregister_instances(self.name, + instances) + self.instances = new_instances + + def delete(self): + """ + Delete this load balancer. + """ + return self.connection.delete_load_balancer(self.name) + + def configure_health_check(self, health_check): + """ + Configures the health check behavior for the instances behind this + load balancer. See :ref:`elb-configuring-a-health-check` for a + walkthrough. + + :param boto.ec2.elb.healthcheck.HealthCheck health_check: A + HealthCheck instance that tells the load balancer how to check + its instances for health. + """ + return self.connection.configure_health_check(self.name, health_check) + + def get_instance_health(self, instances=None): + """ + Returns a list of :py:class:`boto.ec2.elb.instancestate.InstanceState` + objects, which show the health of the instances attached to this + load balancer. + + :rtype: list + :returns: A list of + :py:class:`InstanceState ` + instances, representing the instances + attached to this load balancer. + """ + return self.connection.describe_instance_health(self.name, instances) + + def create_listeners(self, listeners): + return self.connection.create_load_balancer_listeners(self.name, + listeners) + + def create_listener(self, inPort, outPort=None, proto="tcp"): + if outPort is None: + outPort = inPort + return self.create_listeners([(inPort, outPort, proto)]) + + def delete_listeners(self, listeners): + return self.connection.delete_load_balancer_listeners(self.name, + listeners) + + def delete_listener(self, inPort): + return self.delete_listeners([inPort]) + + def delete_policy(self, policy_name): + """ + Deletes a policy from the LoadBalancer. The specified policy must not + be enabled for any listeners. + """ + return self.connection.delete_lb_policy(self.name, policy_name) + + def set_policies_of_listener(self, lb_port, policies): + return self.connection.set_lb_policies_of_listener(self.name, + lb_port, + policies) + + def set_policies_of_backend_server(self, instance_port, policies): + return self.connection.set_lb_policies_of_backend_server( + self.name, instance_port, policies) + + def create_cookie_stickiness_policy(self, cookie_expiration_period, + policy_name): + return self.connection.create_lb_cookie_stickiness_policy( + cookie_expiration_period, self.name, policy_name) + + def create_app_cookie_stickiness_policy(self, name, policy_name): + return self.connection.create_app_cookie_stickiness_policy(name, + self.name, + policy_name) + + def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id): + return self.connection.set_lb_listener_SSL_certificate( + self.name, lb_port, ssl_certificate_id) + + def create_lb_policy(self, policy_name, policy_type, policy_attribute): + return self.connection.create_lb_policy( + self.name, policy_name, policy_type, policy_attribute) + + def attach_subnets(self, subnets): + """ + Attaches load balancer to one or more subnets. + Attaching subnets that are already registered with the + Load Balancer has no effect. + + :type subnets: string or List of strings + :param subnets: The name of the subnet(s) to add. + + """ + if isinstance(subnets, six.string_types): + subnets = [subnets] + new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets) + self.subnets = new_subnets + + def detach_subnets(self, subnets): + """ + Detaches load balancer from one or more subnets. + + :type subnets: string or List of strings + :param subnets: The name of the subnet(s) to detach. + + """ + if isinstance(subnets, six.string_types): + subnets = [subnets] + new_subnets = self.connection.detach_lb_from_subnets( + self.name, subnets) + self.subnets = new_subnets + + def apply_security_groups(self, security_groups): + """ + Associates one or more security groups with the load balancer. + The provided security groups will override any currently applied + security groups. + + :type security_groups: string or List of strings + :param security_groups: The name of the security group(s) to add. + + """ + if isinstance(security_groups, six.string_types): + security_groups = [security_groups] + new_sgs = self.connection.apply_security_groups_to_lb( + self.name, security_groups) + self.security_groups = new_sgs diff --git a/ext/boto/ec2/elb/policies.py b/ext/boto/ec2/elb/policies.py new file mode 100644 index 0000000000..50fac0aff9 --- /dev/null +++ b/ext/boto/ec2/elb/policies.py @@ -0,0 +1,108 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.resultset import ResultSet + + +class AppCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.cookie_name = None + self.policy_name = None + + def __repr__(self): + return 'AppCookieStickiness(%s, %s)' % (self.policy_name, + self.cookie_name) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CookieName': + self.cookie_name = value + elif name == 'PolicyName': + self.policy_name = value + + +class LBCookieStickinessPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + self.cookie_expiration_period = None + + def __repr__(self): + return 'LBCookieStickiness(%s, %s)' % (self.policy_name, + self.cookie_expiration_period) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CookieExpirationPeriod': + self.cookie_expiration_period = value + elif name == 'PolicyName': + self.policy_name = value + + +class OtherPolicy(object): + def __init__(self, connection=None): + self.policy_name = None + + def __repr__(self): + return 'OtherPolicy(%s)' % (self.policy_name) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + self.policy_name = value + + +class Policies(object): + """ + ELB Policies + """ + def __init__(self, connection=None): + self.connection = connection + self.app_cookie_stickiness_policies = None + self.lb_cookie_stickiness_policies = None + self.other_policies = None + + def __repr__(self): + app = 'AppCookieStickiness%s' % self.app_cookie_stickiness_policies + lb = 'LBCookieStickiness%s' % self.lb_cookie_stickiness_policies + other = 'Other%s' % self.other_policies + return 'Policies(%s,%s,%s)' % (app, lb, other) + + def startElement(self, name, attrs, connection): + if name == 'AppCookieStickinessPolicies': + rs = ResultSet([('member', AppCookieStickinessPolicy)]) + self.app_cookie_stickiness_policies = rs + return rs + elif name == 'LBCookieStickinessPolicies': + rs = ResultSet([('member', LBCookieStickinessPolicy)]) + self.lb_cookie_stickiness_policies = rs + return rs + elif name == 'OtherPolicies': + rs = ResultSet([('member', OtherPolicy)]) + self.other_policies = rs + return rs + + def endElement(self, name, value, connection): + return diff --git a/ext/boto/ec2/elb/securitygroup.py b/ext/boto/ec2/elb/securitygroup.py new file mode 100644 index 0000000000..65f981f8e2 --- /dev/null +++ b/ext/boto/ec2/elb/securitygroup.py @@ -0,0 +1,38 @@ +# Copyright (c) 2010 Reza Lotun http://reza.lotun.name +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class SecurityGroup(object): + def __init__(self, connection=None): + self.name = None + self.owner_alias = None + + def __repr__(self): + return 'SecurityGroup(%s, %s)' % (self.name, self.owner_alias) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'GroupName': + self.name = value + elif name == 'OwnerAlias': + self.owner_alias = value diff --git a/ext/boto/ec2/group.py b/ext/boto/ec2/group.py new file mode 100644 index 0000000000..99d7873421 --- /dev/null +++ b/ext/boto/ec2/group.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Group(object): + def __init__(self, parent=None): + self.id = None + self.name = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'groupId': + self.id = value + elif name == 'groupName': + self.name = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/image.py b/ext/boto/ec2/image.py new file mode 100644 index 0000000000..68ab03710f --- /dev/null +++ b/ext/boto/ec2/image.py @@ -0,0 +1,445 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.ec2object import EC2Object, TaggedEC2Object +from boto.ec2.blockdevicemapping import BlockDeviceMapping + + +class ProductCodes(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'productCode': + self.append(value) + + +class BillingProducts(list): + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'billingProduct': + self.append(value) + + +class Image(TaggedEC2Object): + """ + Represents an EC2 Image + """ + + def __init__(self, connection=None): + super(Image, self).__init__(connection) + self.id = None + self.location = None + self.state = None + self.ownerId = None # for backwards compatibility + self.owner_id = None + self.owner_alias = None + self.is_public = False + self.architecture = None + self.platform = None + self.type = None + self.kernel_id = None + self.ramdisk_id = None + self.name = None + self.description = None + self.product_codes = ProductCodes() + self.billing_products = BillingProducts() + self.block_device_mapping = None + self.root_device_type = None + self.root_device_name = None + self.virtualization_type = None + self.hypervisor = None + self.instance_lifecycle = None + self.sriov_net_support = None + + def __repr__(self): + return 'Image:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(Image, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + elif name == 'billingProducts': + return self.billing_products + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.id = value + elif name == 'imageLocation': + self.location = value + elif name == 'imageState': + self.state = value + elif name == 'imageOwnerId': + self.ownerId = value # for backwards compatibility + self.owner_id = value + elif name == 'isPublic': + if value == 'false': + self.is_public = False + elif value == 'true': + self.is_public = True + else: + raise Exception( + 'Unexpected value of isPublic %s for image %s' % ( + value, + self.id + ) + ) + elif name == 'architecture': + self.architecture = value + elif name == 'imageType': + self.type = value + elif name == 'kernelId': + self.kernel_id = value + elif name == 'ramdiskId': + self.ramdisk_id = value + elif name == 'imageOwnerAlias': + self.owner_alias = value + elif name == 'platform': + self.platform = value + elif name == 'name': + self.name = value + elif name == 'description': + self.description = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'rootDeviceName': + self.root_device_name = value + elif name == 'virtualizationType': + self.virtualization_type = value + elif name == 'hypervisor': + self.hypervisor = value + elif name == 'instanceLifecycle': + self.instance_lifecycle = value + elif name == 'sriovNetSupport': + self.sriov_net_support = value + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the image's state information by making a call to fetch + the current image attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + image the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_images([self.id], dry_run=dry_run) + if len(rs) > 0: + img = rs[0] + if img.id == self.id: + self._update(img) + elif validate: + raise ValueError('%s is not a valid Image ID' % self.id) + return self.state + + def run(self, min_count=1, max_count=1, key_name=None, + security_groups=None, user_data=None, + addressing_type=None, instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None, + disable_api_termination=False, + instance_initiated_shutdown_behavior=None, + private_ip_address=None, + placement_group=None, security_group_ids=None, + additional_info=None, instance_profile_name=None, + instance_profile_arn=None, tenancy=None, dry_run=False): + + """ + Runs this instance. + + :type min_count: int + :param min_count: The minimum number of instances to start + + :type max_count: int + :param max_count: The maximum number of instances to start + + :type key_name: string + :param key_name: The name of the key pair with which to + launch instances. + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to + associate instances. + + :type user_data: string + :param user_data: The Base64-encoded MIME user data to be made + available to the instance(s) in this reservation. + + :type instance_type: string + :param instance_type: The type of instance to run: + + * t1.micro + * m1.small + * m1.medium + * m1.large + * m1.xlarge + * m3.medium + * m3.large + * m3.xlarge + * m3.2xlarge + * c1.medium + * c1.xlarge + * m2.xlarge + * m2.2xlarge + * m2.4xlarge + * cr1.8xlarge + * hi1.4xlarge + * hs1.8xlarge + * cc1.4xlarge + * cg1.4xlarge + * cc2.8xlarge + * g2.2xlarge + * c3.large + * c3.xlarge + * c3.2xlarge + * c3.4xlarge + * c3.8xlarge + * c4.large + * c4.xlarge + * c4.2xlarge + * c4.4xlarge + * c4.8xlarge + * i2.xlarge + * i2.2xlarge + * i2.4xlarge + * i2.8xlarge + * t2.micro + * t2.small + * t2.medium + + :type placement: string + :param placement: The Availability Zone to launch the instance into. + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the + instances. + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the + instances. + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable CloudWatch monitoring on + the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances + for VPC. + + :type private_ip_address: string + :param private_ip_address: If you're using VPC, you can + optionally use this parameter to assign the instance a + specific available IP address from the subnet (e.g., + 10.0.0.25). + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + + :type disable_api_termination: bool + :param disable_api_termination: If True, the instances will be locked + and will not be able to be terminated via the API. + + :type instance_initiated_shutdown_behavior: string + :param instance_initiated_shutdown_behavior: Specifies whether the + instance stops or terminates on instance-initiated shutdown. + Valid values are: + + * stop + * terminate + + :type placement_group: string + :param placement_group: If specified, this is the name of the placement + group in which the instance(s) will be launched. + + :type additional_info: string + :param additional_info: Specifies additional information to make + available to the instance(s). + + :type security_group_ids: list of strings + :param security_group_ids: The ID of the VPC security groups with + which to associate instances. + + :type instance_profile_name: string + :param instance_profile_name: The name of + the IAM Instance Profile (IIP) to associate with the instances. + + :type instance_profile_arn: string + :param instance_profile_arn: The Amazon resource name (ARN) of + the IAM Instance Profile (IIP) to associate with the instances. + + :type tenancy: string + :param tenancy: The tenancy of the instance you want to + launch. An instance with a tenancy of 'dedicated' runs on + single-tenant hardware and can only be launched into a + VPC. Valid values are:"default" or "dedicated". + NOTE: To use dedicated tenancy you MUST specify a VPC + subnet-ID as well. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with + the request for machines + + """ + + return self.connection.run_instances(self.id, min_count, max_count, + key_name, security_groups, + user_data, addressing_type, + instance_type, placement, + kernel_id, ramdisk_id, + monitoring_enabled, subnet_id, + block_device_map, disable_api_termination, + instance_initiated_shutdown_behavior, + private_ip_address, placement_group, + security_group_ids=security_group_ids, + additional_info=additional_info, + instance_profile_name=instance_profile_name, + instance_profile_arn=instance_profile_arn, + tenancy=tenancy, dry_run=dry_run) + + def deregister(self, delete_snapshot=False, dry_run=False): + return self.connection.deregister_image( + self.id, + delete_snapshot, + dry_run=dry_run + ) + + def get_launch_permissions(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'launchPermission', + dry_run=dry_run + ) + return img_attrs.attrs + + def set_launch_permissions(self, user_ids=None, group_names=None, + dry_run=False): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'add', + user_ids, + group_names, + dry_run=dry_run) + + def remove_launch_permissions(self, user_ids=None, group_names=None, + dry_run=False): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'remove', + user_ids, + group_names, + dry_run=dry_run) + + def reset_launch_attributes(self, dry_run=False): + return self.connection.reset_image_attribute( + self.id, + 'launchPermission', + dry_run=dry_run + ) + + def get_kernel(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'kernel', + dry_run=dry_run + ) + return img_attrs.kernel + + def get_ramdisk(self, dry_run=False): + img_attrs = self.connection.get_image_attribute( + self.id, + 'ramdisk', + dry_run=dry_run + ) + return img_attrs.ramdisk + + +class ImageAttribute(object): + def __init__(self, parent=None): + self.name = None + self.kernel = None + self.ramdisk = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self.attrs['block_device_mapping'] = BlockDeviceMapping() + return self.attrs['block_device_mapping'] + else: + return None + + def endElement(self, name, value, connection): + if name == 'launchPermission': + self.name = 'launch_permission' + elif name == 'group': + if 'groups' in self.attrs: + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if 'user_ids' in self.attrs: + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'productCode': + if 'product_codes' in self.attrs: + self.attrs['product_codes'].append(value) + else: + self.attrs['product_codes'] = [value] + elif name == 'imageId': + self.image_id = value + elif name == 'kernel': + self.kernel = value + elif name == 'ramdisk': + self.ramdisk = value + else: + setattr(self, name, value) + + +class CopyImage(object): + def __init__(self, parent=None): + self._parent = parent + self.image_id = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'imageId': + self.image_id = value diff --git a/ext/boto/ec2/instance.py b/ext/boto/ec2/instance.py new file mode 100644 index 0000000000..ae0056c333 --- /dev/null +++ b/ext/boto/ec2/instance.py @@ -0,0 +1,677 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Instance +""" +import boto +from boto.ec2.ec2object import EC2Object, TaggedEC2Object +from boto.resultset import ResultSet +from boto.ec2.address import Address +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.image import ProductCodes +from boto.ec2.networkinterface import NetworkInterface +from boto.ec2.group import Group +import base64 + + +class InstanceState(object): + """ + The state of the instance. + + :ivar code: The low byte represents the state. The high byte is an + opaque internal value and should be ignored. Valid values: + + * 0 (pending) + * 16 (running) + * 32 (shutting-down) + * 48 (terminated) + * 64 (stopping) + * 80 (stopped) + + :ivar name: The name of the state of the instance. Valid values: + + * "pending" + * "running" + * "shutting-down" + * "terminated" + * "stopping" + * "stopped" + """ + def __init__(self, code=0, name=None): + self.code = code + self.name = name + + def __repr__(self): + return '%s(%d)' % (self.name, self.code) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'code': + self.code = int(value) + elif name == 'name': + self.name = value + else: + setattr(self, name, value) + + +class InstancePlacement(object): + """ + The location where the instance launched. + + :ivar zone: The Availability Zone of the instance. + :ivar group_name: The name of the placement group the instance is + in (for cluster compute instances). + :ivar tenancy: The tenancy of the instance (if the instance is + running within a VPC). An instance with a tenancy of dedicated + runs on single-tenant hardware. + """ + def __init__(self, zone=None, group_name=None, tenancy=None): + self.zone = zone + self.group_name = group_name + self.tenancy = tenancy + + def __repr__(self): + return self.zone + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'availabilityZone': + self.zone = value + elif name == 'groupName': + self.group_name = value + elif name == 'tenancy': + self.tenancy = value + else: + setattr(self, name, value) + + +class Reservation(EC2Object): + """ + Represents a Reservation response object. + + :ivar id: The unique ID of the Reservation. + :ivar owner_id: The unique ID of the owner of the Reservation. + :ivar groups: A list of Group objects representing the security + groups associated with launched instances. + :ivar instances: A list of Instance objects launched in this + Reservation. + """ + def __init__(self, connection=None): + super(Reservation, self).__init__(connection) + self.id = None + self.owner_id = None + self.groups = [] + self.instances = [] + + def __repr__(self): + return 'Reservation:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'instancesSet': + self.instances = ResultSet([('item', Instance)]) + return self.instances + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + else: + return None + + def endElement(self, name, value, connection): + if name == 'reservationId': + self.id = value + elif name == 'ownerId': + self.owner_id = value + else: + setattr(self, name, value) + + def stop_all(self, dry_run=False): + for instance in self.instances: + instance.stop(dry_run=dry_run) + + +class Instance(TaggedEC2Object): + """ + Represents an instance. + + :ivar id: The unique ID of the Instance. + :ivar groups: A list of Group objects representing the security + groups associated with the instance. + :ivar public_dns_name: The public dns name of the instance. + :ivar private_dns_name: The private dns name of the instance. + :ivar state: The string representation of the instance's current state. + :ivar state_code: An integer representation of the instance's + current state. + :ivar previous_state: The string representation of the instance's + previous state. + :ivar previous_state_code: An integer representation of the + instance's current state. + :ivar key_name: The name of the SSH key associated with the instance. + :ivar instance_type: The type of instance (e.g. m1.small). + :ivar launch_time: The time the instance was launched. + :ivar image_id: The ID of the AMI used to launch this instance. + :ivar placement: The availability zone in which the instance is running. + :ivar placement_group: The name of the placement group the instance + is in (for cluster compute instances). + :ivar placement_tenancy: The tenancy of the instance, if the instance + is running within a VPC. An instance with a tenancy of dedicated + runs on a single-tenant hardware. + :ivar kernel: The kernel associated with the instance. + :ivar ramdisk: The ramdisk associated with the instance. + :ivar architecture: The architecture of the image (i386|x86_64). + :ivar hypervisor: The hypervisor used. + :ivar virtualization_type: The type of virtualization used. + :ivar product_codes: A list of product codes associated with this instance. + :ivar ami_launch_index: This instances position within it's launch group. + :ivar monitored: A boolean indicating whether monitoring is enabled or not. + :ivar monitoring_state: A string value that contains the actual value + of the monitoring element returned by EC2. + :ivar spot_instance_request_id: The ID of the spot instance request + if this is a spot instance. + :ivar subnet_id: The VPC Subnet ID, if running in VPC. + :ivar vpc_id: The VPC ID, if running in VPC. + :ivar private_ip_address: The private IP address of the instance. + :ivar ip_address: The public IP address of the instance. + :ivar platform: Platform of the instance (e.g. Windows) + :ivar root_device_name: The name of the root device. + :ivar root_device_type: The root device type (ebs|instance-store). + :ivar block_device_mapping: The Block Device Mapping for the instance. + :ivar state_reason: The reason for the most recent state transition. + :ivar interfaces: List of Elastic Network Interfaces associated with + this instance. + :ivar ebs_optimized: Whether instance is using optimized EBS volumes + or not. + :ivar instance_profile: A Python dict containing the instance + profile id and arn associated with this instance. + """ + + def __init__(self, connection=None): + super(Instance, self).__init__(connection) + self.id = None + self.dns_name = None + self.public_dns_name = None + self.private_dns_name = None + self.key_name = None + self.instance_type = None + self.launch_time = None + self.image_id = None + self.kernel = None + self.ramdisk = None + self.product_codes = ProductCodes() + self.ami_launch_index = None + self.monitored = False + self.monitoring_state = None + self.spot_instance_request_id = None + self.subnet_id = None + self.vpc_id = None + self.private_ip_address = None + self.ip_address = None + self.requester_id = None + self._in_monitoring_element = False + self.persistent = False + self.root_device_name = None + self.root_device_type = None + self.block_device_mapping = None + self.state_reason = None + self.group_name = None + self.client_token = None + self.eventsSet = None + self.groups = [] + self.platform = None + self.interfaces = [] + self.hypervisor = None + self.virtualization_type = None + self.architecture = None + self.instance_profile = None + self._previous_state = None + self._state = InstanceState() + self._placement = InstancePlacement() + + def __repr__(self): + return 'Instance:%s' % self.id + + @property + def state(self): + return self._state.name + + @property + def state_code(self): + return self._state.code + + @property + def previous_state(self): + if self._previous_state: + return self._previous_state.name + return None + + @property + def previous_state_code(self): + if self._previous_state: + return self._previous_state.code + return 0 + + @property + def placement(self): + return self._placement.zone + + @property + def placement_group(self): + return self._placement.group_name + + @property + def placement_tenancy(self): + return self._placement.tenancy + + def startElement(self, name, attrs, connection): + retval = super(Instance, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + elif name == 'stateReason': + self.state_reason = SubParse('stateReason') + return self.state_reason + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == "eventsSet": + self.eventsSet = SubParse('eventsSet') + return self.eventsSet + elif name == 'networkInterfaceSet': + self.interfaces = ResultSet([('item', NetworkInterface)]) + return self.interfaces + elif name == 'iamInstanceProfile': + self.instance_profile = SubParse('iamInstanceProfile') + return self.instance_profile + elif name == 'currentState': + return self._state + elif name == 'previousState': + self._previous_state = InstanceState() + return self._previous_state + elif name == 'instanceState': + return self._state + elif name == 'placement': + return self._placement + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.id = value + elif name == 'imageId': + self.image_id = value + elif name == 'dnsName' or name == 'publicDnsName': + self.dns_name = value # backwards compatibility + self.public_dns_name = value + elif name == 'privateDnsName': + self.private_dns_name = value + elif name == 'keyName': + self.key_name = value + elif name == 'amiLaunchIndex': + self.ami_launch_index = value + elif name == 'previousState': + self.previous_state = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'rootDeviceName': + self.root_device_name = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'launchTime': + self.launch_time = value + elif name == 'platform': + self.platform = value + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'state': + if self._in_monitoring_element: + self.monitoring_state = value + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + elif name == 'spotInstanceRequestId': + self.spot_instance_request_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'requesterId': + self.requester_id = value + elif name == 'persistent': + if value == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'groupName': + if self._in_monitoring_element: + self.group_name = value + elif name == 'clientToken': + self.client_token = value + elif name == "eventsSet": + self.events = value + elif name == 'hypervisor': + self.hypervisor = value + elif name == 'virtualizationType': + self.virtualization_type = value + elif name == 'architecture': + self.architecture = value + elif name == 'ebsOptimized': + self.ebs_optimized = (value == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the instance's state information by making a call to fetch + the current instance attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_reservations([self.id], dry_run=dry_run) + if len(rs) > 0: + r = rs[0] + for i in r.instances: + if i.id == self.id: + self._update(i) + elif validate: + raise ValueError('%s is not a valid Instance ID' % self.id) + return self.state + + def terminate(self, dry_run=False): + """ + Terminate the instance + """ + rs = self.connection.terminate_instances([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def stop(self, force=False, dry_run=False): + """ + Stop the instance + + :type force: bool + :param force: Forces the instance to stop + + :rtype: list + :return: A list of the instances stopped + """ + rs = self.connection.stop_instances([self.id], force, dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def start(self, dry_run=False): + """ + Start the instance. + """ + rs = self.connection.start_instances([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + + def reboot(self, dry_run=False): + return self.connection.reboot_instances([self.id], dry_run=dry_run) + + def get_console_output(self, dry_run=False): + """ + Retrieves the console output for the instance. + + :rtype: :class:`boto.ec2.instance.ConsoleOutput` + :return: The console output as a ConsoleOutput object + """ + return self.connection.get_console_output(self.id, dry_run=dry_run) + + def confirm_product(self, product_code, dry_run=False): + return self.connection.confirm_product_instance( + self.id, + product_code, + dry_run=dry_run + ) + + def use_ip(self, ip_address, dry_run=False): + """ + Associates an Elastic IP to the instance. + + :type ip_address: Either an instance of + :class:`boto.ec2.address.Address` or a string. + :param ip_address: The IP address to associate + with the instance. + + :rtype: bool + :return: True if successful + """ + + if isinstance(ip_address, Address): + ip_address = ip_address.public_ip + return self.connection.associate_address( + self.id, + ip_address, + dry_run=dry_run + ) + + def monitor(self, dry_run=False): + return self.connection.monitor_instance(self.id, dry_run=dry_run) + + def unmonitor(self, dry_run=False): + return self.connection.unmonitor_instance(self.id, dry_run=dry_run) + + def get_attribute(self, attribute, dry_run=False): + """ + Gets an attribute from this instance. + + :type attribute: string + :param attribute: The attribute you need information about + Valid choices are: + + * instanceType + * kernel + * ramdisk + * userData + * disableApiTermination + * instanceInitiatedShutdownBehavior + * rootDeviceName + * blockDeviceMapping + * productCodes + * sourceDestCheck + * groupSet + * ebsOptimized + + :rtype: :class:`boto.ec2.image.InstanceAttribute` + :return: An InstanceAttribute object representing the value of the + attribute requested + """ + return self.connection.get_instance_attribute( + self.id, + attribute, + dry_run=dry_run + ) + + def modify_attribute(self, attribute, value, dry_run=False): + """ + Changes an attribute of this instance + + :type attribute: string + :param attribute: The attribute you wish to change. + + * instanceType - A valid instance type (m1.small) + * kernel - Kernel ID (None) + * ramdisk - Ramdisk ID (None) + * userData - Base64 encoded String (None) + * disableApiTermination - Boolean (true) + * instanceInitiatedShutdownBehavior - stop|terminate + * sourceDestCheck - Boolean (true) + * groupSet - Set of Security Groups or IDs + * ebsOptimized - Boolean (false) + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + """ + return self.connection.modify_instance_attribute( + self.id, + attribute, + value, + dry_run=dry_run + ) + + def reset_attribute(self, attribute, dry_run=False): + """ + Resets an attribute of this instance to its default value. + + :type attribute: string + :param attribute: The attribute to reset. Valid values are: + kernel|ramdisk + + :rtype: bool + :return: Whether the operation succeeded or not + """ + return self.connection.reset_instance_attribute( + self.id, + attribute, + dry_run=dry_run + ) + + def create_image(self, name, description=None, no_reboot=False, + dry_run=False): + """ + Will create an AMI from the instance in the running or stopped + state. + + :type name: string + :param name: The name of the new image + + :type description: string + :param description: An optional human-readable string describing + the contents and purpose of the AMI. + + :type no_reboot: bool + :param no_reboot: An optional flag indicating that the bundling process + should not attempt to shutdown the instance before + bundling. If this flag is True, the responsibility + of maintaining file system integrity is left to the + owner of the instance. + + :rtype: string + :return: The new image id + """ + return self.connection.create_image( + self.id, + name, + description, + no_reboot, + dry_run=dry_run + ) + + +class ConsoleOutput(object): + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.output = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'timestamp': + self.timestamp = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + + +class InstanceAttribute(dict): + ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData', + 'disableApiTermination', + 'instanceInitiatedShutdownBehavior', + 'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck', + 'groupSet'] + + def __init__(self, parent=None): + dict.__init__(self) + self.instance_id = None + self.request_id = None + self._current_value = None + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self[name] = BlockDeviceMapping() + return self[name] + elif name == 'groupSet': + self[name] = ResultSet([('item', Group)]) + return self[name] + else: + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'value': + if value == 'true': + value = True + elif value == 'false': + value = False + self._current_value = value + elif name in self.ValidValues: + self[name] = self._current_value + + +class SubParse(dict): + def __init__(self, section, parent=None): + dict.__init__(self) + self.section = section + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != self.section: + self[name] = value diff --git a/ext/boto/ec2/instanceinfo.py b/ext/boto/ec2/instanceinfo.py new file mode 100644 index 0000000000..afa8b9cba0 --- /dev/null +++ b/ext/boto/ec2/instanceinfo.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceInfo(object): + """ + Represents an EC2 Instance status response from CloudWatch + """ + + def __init__(self, connection=None, id=None, state=None): + """ + :ivar str id: The instance's EC2 ID. + :ivar str state: Specifies the current status of the instance. + """ + self.connection = connection + self.id = id + self.state = state + + def __repr__(self): + return 'InstanceInfo:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId' or name == 'InstanceId': + self.id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/instancestatus.py b/ext/boto/ec2/instancestatus.py new file mode 100644 index 0000000000..b09b55ee80 --- /dev/null +++ b/ext/boto/ec2/instancestatus.py @@ -0,0 +1,212 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Details(dict): + """ + A dict object that contains name/value pairs which provide + more detailed information about the status of the system + or the instance. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'name': + self._name = value + elif name == 'status': + self[self._name] = value + else: + setattr(self, name, value) + + +class Event(object): + """ + A status event for an instance. + + :ivar code: A string indicating the event type. + :ivar description: A string describing the reason for the event. + :ivar not_before: A datestring describing the earliest time for + the event. + :ivar not_after: A datestring describing the latest time for + the event. + """ + + def __init__(self, code=None, description=None, + not_before=None, not_after=None): + self.code = code + self.description = description + self.not_before = not_before + self.not_after = not_after + + def __repr__(self): + return 'Event:%s' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'description': + self.description = value + elif name == 'notBefore': + self.not_before = value + elif name == 'notAfter': + self.not_after = value + else: + setattr(self, name, value) + + +class Status(object): + """ + A generic Status object used for system status and instance status. + + :ivar status: A string indicating overall status. + :ivar details: A dict containing name-value pairs which provide + more details about the current status. + """ + + def __init__(self, status=None, details=None): + self.status = status + if not details: + details = Details() + self.details = details + + def __repr__(self): + return 'Status:%s' % self.status + + def startElement(self, name, attrs, connection): + if name == 'details': + return self.details + return None + + def endElement(self, name, value, connection): + if name == 'status': + self.status = value + else: + setattr(self, name, value) + + +class EventSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + event = Event() + self.append(event) + return event + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class InstanceStatus(object): + """ + Represents an EC2 Instance status as reported by + DescribeInstanceStatus request. + + :ivar id: The instance identifier. + :ivar zone: The availability zone of the instance. + :ivar events: A list of events relevant to the instance. + :ivar state_code: An integer representing the current state + of the instance. + :ivar state_name: A string describing the current state + of the instance. + :ivar system_status: A Status object that reports impaired + functionality that stems from issues related to the systems + that support an instance, such as such as hardware failures + and network connectivity problems. + :ivar instance_status: A Status object that reports impaired + functionality that arises from problems internal to the instance. + """ + + def __init__(self, id=None, zone=None, events=None, + state_code=None, state_name=None): + self.id = id + self.zone = zone + self.events = events + self.state_code = state_code + self.state_name = state_name + self.system_status = Status() + self.instance_status = Status() + + def __repr__(self): + return 'InstanceStatus:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'eventsSet': + self.events = EventSet() + return self.events + elif name == 'systemStatus': + return self.system_status + elif name == 'instanceStatus': + return self.instance_status + else: + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.id = value + elif name == 'availabilityZone': + self.zone = value + elif name == 'code': + self.state_code = int(value) + elif name == 'name': + self.state_name = value + else: + setattr(self, name, value) + + +class InstanceStatusSet(list): + """ + A list object that contains the results of a call to + DescribeInstanceStatus request. Each element of the + list will be an InstanceStatus object. + + :ivar next_token: If the response was truncated by + the EC2 service, the next_token attribute of the + object will contain the string that needs to be + passed in to the next request to retrieve the next + set of results. + """ + + def __init__(self, connection=None): + list.__init__(self) + self.connection = connection + self.next_token = None + + def startElement(self, name, attrs, connection): + if name == 'item': + status = InstanceStatus() + self.append(status) + return status + else: + return None + + def endElement(self, name, value, connection): + if name == 'nextToken': + self.next_token = value + setattr(self, name, value) diff --git a/ext/boto/ec2/instancetype.py b/ext/boto/ec2/instancetype.py new file mode 100644 index 0000000000..6197a3363b --- /dev/null +++ b/ext/boto/ec2/instancetype.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.ec2object import EC2Object + + +class InstanceType(EC2Object): + """ + Represents an EC2 VM Type + + :ivar name: The name of the vm type + :ivar cores: The number of cpu cores for this vm type + :ivar memory: The amount of memory in megabytes for this vm type + :ivar disk: The amount of disk space in gigabytes for this vm type + """ + + def __init__(self, connection=None, name=None, cores=None, + memory=None, disk=None): + super(InstanceType, self).__init__(connection) + self.connection = connection + self.name = name + self.cores = cores + self.memory = memory + self.disk = disk + + def __repr__(self): + return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores, + self.memory, self.disk) + + def endElement(self, name, value, connection): + if name == 'name': + self.name = value + elif name == 'cpu': + self.cores = value + elif name == 'disk': + self.disk = value + elif name == 'memory': + self.memory = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/keypair.py b/ext/boto/ec2/keypair.py new file mode 100644 index 0000000000..623fb409f8 --- /dev/null +++ b/ext/boto/ec2/keypair.py @@ -0,0 +1,111 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Keypair +""" + +import os +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + + +class KeyPair(EC2Object): + + def __init__(self, connection=None): + super(KeyPair, self).__init__(connection) + self.name = None + self.fingerprint = None + self.material = None + + def __repr__(self): + return 'KeyPair:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'keyName': + self.name = value + elif name == 'keyFingerprint': + self.fingerprint = value + elif name == 'keyMaterial': + self.material = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + """ + Delete the KeyPair. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.delete_key_pair(self.name, dry_run=dry_run) + + def save(self, directory_path): + """ + Save the material (the unencrypted PEM encoded RSA private key) + of a newly created KeyPair to a local file. + + :type directory_path: string + :param directory_path: The fully qualified path to the directory + in which the keypair will be saved. The + keypair file will be named using the name + of the keypair as the base name and .pem + for the file extension. If a file of that + name already exists in the directory, an + exception will be raised and the old file + will not be overwritten. + + :rtype: bool + :return: True if successful. + """ + if self.material: + directory_path = os.path.expanduser(directory_path) + file_path = os.path.join(directory_path, '%s.pem' % self.name) + if os.path.exists(file_path): + raise BotoClientError('%s already exists, it will not be overwritten' % file_path) + fp = open(file_path, 'wb') + fp.write(self.material) + fp.close() + os.chmod(file_path, 0o600) + return True + else: + raise BotoClientError('KeyPair contains no material') + + def copy_to_region(self, region, dry_run=False): + """ + Create a new key pair of the same new in another region. + Note that the new key pair will use a different ssh + cert than the this key pair. After doing the copy, + you will need to save the material associated with the + new key pair (use the save method) to a local file. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The new key pair + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + kp = rconn.create_key_pair(self.name, dry_run=dry_run) + return kp diff --git a/ext/boto/ec2/launchspecification.py b/ext/boto/ec2/launchspecification.py new file mode 100644 index 0000000000..f145ac4799 --- /dev/null +++ b/ext/boto/ec2/launchspecification.py @@ -0,0 +1,105 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a launch specification for Spot instances. +""" + +from boto.ec2.ec2object import EC2Object +from boto.resultset import ResultSet +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.group import Group +from boto.ec2.instance import SubParse + + +class GroupList(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'groupId': + self.append(value) + + +class LaunchSpecification(EC2Object): + + def __init__(self, connection=None): + super(LaunchSpecification, self).__init__(connection) + self.key_name = None + self.instance_type = None + self.image_id = None + self.groups = [] + self.placement = None + self.kernel = None + self.ramdisk = None + self.monitored = False + self.subnet_id = None + self._in_monitoring_element = False + self.block_device_mapping = None + self.instance_profile = None + self.ebs_optimized = False + + def __repr__(self): + return 'LaunchSpecification(%s)' % self.image_id + + def startElement(self, name, attrs, connection): + if name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'iamInstanceProfile': + self.instance_profile = SubParse('iamInstanceProfile') + return self.instance_profile + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.image_id = value + elif name == 'keyName': + self.key_name = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.placement = value + elif name == 'placement': + pass + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'state': + if self._in_monitoring_element: + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + elif name == 'ebsOptimized': + self.ebs_optimized = (value == 'true') + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/networkinterface.py b/ext/boto/ec2/networkinterface.py new file mode 100644 index 0000000000..9bbeb7715e --- /dev/null +++ b/ext/boto/ec2/networkinterface.py @@ -0,0 +1,351 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Network Interface +""" +from boto.exception import BotoClientError +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet +from boto.ec2.group import Group + + +class Attachment(object): + """ + :ivar id: The ID of the attachment. + :ivar instance_id: The ID of the instance. + :ivar device_index: The index of this device. + :ivar status: The status of the device. + :ivar attach_time: The time the device was attached. + :ivar delete_on_termination: Whether the device will be deleted + when the instance is terminated. + """ + + def __init__(self): + self.id = None + self.instance_id = None + self.instance_owner_id = None + self.device_index = 0 + self.status = None + self.attach_time = None + self.delete_on_termination = False + + def __repr__(self): + return 'Attachment:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'attachmentId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'deviceIndex': + self.device_index = int(value) + elif name == 'instanceOwnerId': + self.instance_owner_id = value + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'deleteOnTermination': + if value.lower() == 'true': + self.delete_on_termination = True + else: + self.delete_on_termination = False + else: + setattr(self, name, value) + + +class NetworkInterface(TaggedEC2Object): + """ + An Elastic Network Interface. + + :ivar id: The ID of the ENI. + :ivar subnet_id: The ID of the VPC subnet. + :ivar vpc_id: The ID of the VPC. + :ivar description: The description. + :ivar owner_id: The ID of the owner of the ENI. + :ivar requester_managed: + :ivar status: The interface's status (available|in-use). + :ivar mac_address: The MAC address of the interface. + :ivar private_ip_address: The IP address of the interface within + the subnet. + :ivar source_dest_check: Flag to indicate whether to validate + network traffic to or from this network interface. + :ivar groups: List of security groups associated with the interface. + :ivar attachment: The attachment object. + :ivar private_ip_addresses: A list of PrivateIPAddress objects. + """ + + def __init__(self, connection=None): + super(NetworkInterface, self).__init__(connection) + self.id = None + self.subnet_id = None + self.vpc_id = None + self.availability_zone = None + self.description = None + self.owner_id = None + self.requester_managed = False + self.status = None + self.mac_address = None + self.private_ip_address = None + self.source_dest_check = None + self.groups = [] + self.attachment = None + self.private_ip_addresses = [] + + def __repr__(self): + return 'NetworkInterface:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(NetworkInterface, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == 'attachment': + self.attachment = Attachment() + return self.attachment + elif name == 'privateIpAddressesSet': + self.private_ip_addresses = ResultSet([('item', PrivateIPAddress)]) + return self.private_ip_addresses + else: + return None + + def endElement(self, name, value, connection): + if name == 'networkInterfaceId': + self.id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'description': + self.description = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'requesterManaged': + if value.lower() == 'true': + self.requester_managed = True + else: + self.requester_managed = False + elif name == 'status': + self.status = value + elif name == 'macAddress': + self.mac_address = value + elif name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'sourceDestCheck': + if value.lower() == 'true': + self.source_dest_check = True + else: + self.source_dest_check = False + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this ENI by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + ENI the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_network_interfaces( + [self.id], + dry_run=dry_run + ) + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid ENI ID' % self.id) + return self.status + + def attach(self, instance_id, device_index, dry_run=False): + """ + Attach this ENI to an EC2 instance. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device_index: int + :param device_index: The interface nunber, N, on the instance (eg. ethN) + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_network_interface( + self.id, + instance_id, + device_index, + dry_run=dry_run + ) + + def detach(self, force=False, dry_run=False): + """ + Detach this ENI from an EC2 instance. + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. + + :rtype: bool + :return: True if successful + """ + attachment_id = getattr(self.attachment, 'id', None) + + return self.connection.detach_network_interface( + attachment_id, + force, + dry_run=dry_run + ) + + def delete(self, dry_run=False): + return self.connection.delete_network_interface( + self.id, + dry_run=dry_run + ) + + +class PrivateIPAddress(object): + def __init__(self, connection=None, private_ip_address=None, + primary=None): + self.connection = connection + self.private_ip_address = private_ip_address + self.primary = primary + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'primary': + self.primary = True if value.lower() == 'true' else False + + def __repr__(self): + return "PrivateIPAddress(%s, primary=%s)" % (self.private_ip_address, + self.primary) + + +class NetworkInterfaceCollection(list): + def __init__(self, *interfaces): + self.extend(interfaces) + + def build_list_params(self, params, prefix=''): + for i, spec in enumerate(self): + full_prefix = '%sNetworkInterface.%s.' % (prefix, i) + if spec.network_interface_id is not None: + params[full_prefix + 'NetworkInterfaceId'] = \ + str(spec.network_interface_id) + if spec.device_index is not None: + params[full_prefix + 'DeviceIndex'] = \ + str(spec.device_index) + else: + params[full_prefix + 'DeviceIndex'] = 0 + if spec.subnet_id is not None: + params[full_prefix + 'SubnetId'] = str(spec.subnet_id) + if spec.description is not None: + params[full_prefix + 'Description'] = str(spec.description) + if spec.delete_on_termination is not None: + params[full_prefix + 'DeleteOnTermination'] = \ + 'true' if spec.delete_on_termination else 'false' + if spec.secondary_private_ip_address_count is not None: + params[full_prefix + 'SecondaryPrivateIpAddressCount'] = \ + str(spec.secondary_private_ip_address_count) + if spec.private_ip_address is not None: + params[full_prefix + 'PrivateIpAddress'] = \ + str(spec.private_ip_address) + if spec.groups is not None: + for j, group_id in enumerate(spec.groups): + query_param_key = '%sSecurityGroupId.%s' % (full_prefix, j) + params[query_param_key] = str(group_id) + if spec.private_ip_addresses is not None: + for k, ip_addr in enumerate(spec.private_ip_addresses): + query_param_key_prefix = ( + '%sPrivateIpAddresses.%s' % (full_prefix, k)) + params[query_param_key_prefix + '.PrivateIpAddress'] = \ + str(ip_addr.private_ip_address) + if ip_addr.primary is not None: + params[query_param_key_prefix + '.Primary'] = \ + 'true' if ip_addr.primary else 'false' + + # Associating Public IPs have special logic around them: + # + # * Only assignable on an device_index of ``0`` + # * Only on one interface + # * Only if there are no other interfaces being created + # * Only if it's a new interface (which we can't really guard + # against) + # + # More details on http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-RunInstances.html + if spec.associate_public_ip_address is not None: + if not params[full_prefix + 'DeviceIndex'] in (0, '0'): + raise BotoClientError( + "Only the interface with device index of 0 can " + \ + "be provided when using " + \ + "'associate_public_ip_address'." + ) + + if len(self) > 1: + raise BotoClientError( + "Only one interface can be provided when using " + \ + "'associate_public_ip_address'." + ) + + key = full_prefix + 'AssociatePublicIpAddress' + + if spec.associate_public_ip_address: + params[key] = 'true' + else: + params[key] = 'false' + + +class NetworkInterfaceSpecification(object): + def __init__(self, network_interface_id=None, device_index=None, + subnet_id=None, description=None, private_ip_address=None, + groups=None, delete_on_termination=None, + private_ip_addresses=None, + secondary_private_ip_address_count=None, + associate_public_ip_address=None): + self.network_interface_id = network_interface_id + self.device_index = device_index + self.subnet_id = subnet_id + self.description = description + self.private_ip_address = private_ip_address + self.groups = groups + self.delete_on_termination = delete_on_termination + self.private_ip_addresses = private_ip_addresses + self.secondary_private_ip_address_count = \ + secondary_private_ip_address_count + self.associate_public_ip_address = associate_public_ip_address diff --git a/ext/boto/ec2/placementgroup.py b/ext/boto/ec2/placementgroup.py new file mode 100644 index 0000000000..0c2596616d --- /dev/null +++ b/ext/boto/ec2/placementgroup.py @@ -0,0 +1,53 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +Represents an EC2 Placement Group +""" +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + + +class PlacementGroup(EC2Object): + + def __init__(self, connection=None, name=None, strategy=None, state=None): + super(PlacementGroup, self).__init__(connection) + self.name = name + self.strategy = strategy + self.state = state + + def __repr__(self): + return 'PlacementGroup:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'groupName': + self.name = value + elif name == 'strategy': + self.strategy = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_placement_group( + self.name, + dry_run=dry_run + ) diff --git a/ext/boto/ec2/regioninfo.py b/ext/boto/ec2/regioninfo.py new file mode 100644 index 0000000000..21a56fb927 --- /dev/null +++ b/ext/boto/ec2/regioninfo.py @@ -0,0 +1,36 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.regioninfo import RegionInfo + + +class EC2RegionInfo(RegionInfo): + """ + Represents an EC2 Region + """ + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.ec2.connection import EC2Connection + super(EC2RegionInfo, self).__init__(connection, name, endpoint, + EC2Connection) diff --git a/ext/boto/ec2/reservedinstance.py b/ext/boto/ec2/reservedinstance.py new file mode 100644 index 0000000000..5ccc008e1b --- /dev/null +++ b/ext/boto/ec2/reservedinstance.py @@ -0,0 +1,352 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.resultset import ResultSet +from boto.ec2.ec2object import EC2Object +from boto.utils import parse_ts + + +class ReservedInstancesOffering(EC2Object): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None, instance_tenancy=None, + currency_code=None, offering_type=None, + recurring_charges=None, pricing_details=None): + super(ReservedInstancesOffering, self).__init__(connection) + self.id = id + self.instance_type = instance_type + self.availability_zone = availability_zone + self.duration = duration + self.fixed_price = fixed_price + self.usage_price = usage_price + self.description = description + self.instance_tenancy = instance_tenancy + self.currency_code = currency_code + self.offering_type = offering_type + self.recurring_charges = recurring_charges + self.pricing_details = pricing_details + + def __repr__(self): + return 'ReservedInstanceOffering:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'recurringCharges': + self.recurring_charges = ResultSet([('item', RecurringCharge)]) + return self.recurring_charges + elif name == 'pricingDetailsSet': + self.pricing_details = ResultSet([('item', PricingDetail)]) + return self.pricing_details + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesOfferingId': + self.id = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'duration': + self.duration = int(value) + elif name == 'fixedPrice': + self.fixed_price = value + elif name == 'usagePrice': + self.usage_price = value + elif name == 'productDescription': + self.description = value + elif name == 'instanceTenancy': + self.instance_tenancy = value + elif name == 'currencyCode': + self.currency_code = value + elif name == 'offeringType': + self.offering_type = value + elif name == 'marketplace': + self.marketplace = True if value == 'true' else False + + def describe(self): + print('ID=%s' % self.id) + print('\tInstance Type=%s' % self.instance_type) + print('\tZone=%s' % self.availability_zone) + print('\tDuration=%s' % self.duration) + print('\tFixed Price=%s' % self.fixed_price) + print('\tUsage Price=%s' % self.usage_price) + print('\tDescription=%s' % self.description) + + def purchase(self, instance_count=1, dry_run=False): + return self.connection.purchase_reserved_instance_offering( + self.id, + instance_count, + dry_run=dry_run + ) + + +class RecurringCharge(object): + def __init__(self, connection=None, frequency=None, amount=None): + self.frequency = frequency + self.amount = amount + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class PricingDetail(object): + def __init__(self, connection=None, price=None, count=None): + self.price = price + self.count = count + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class ReservedInstance(ReservedInstancesOffering): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None, + instance_count=None, state=None): + super(ReservedInstance, self).__init__(connection, id, instance_type, + availability_zone, duration, + fixed_price, usage_price, + description) + self.instance_count = instance_count + self.state = state + self.start = None + self.end = None + + def __repr__(self): + return 'ReservedInstance:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'reservedInstancesId': + self.id = value + if name == 'instanceCount': + self.instance_count = int(value) + elif name == 'state': + self.state = value + elif name == 'start': + self.start = value + elif name == 'end': + self.end = value + else: + super(ReservedInstance, self).endElement(name, value, connection) + + +class ReservedInstanceListing(EC2Object): + def __init__(self, connection=None, listing_id=None, id=None, + create_date=None, update_date=None, + status=None, status_message=None, client_token=None): + self.connection = connection + self.listing_id = listing_id + self.id = id + self.create_date = create_date + self.update_date = update_date + self.status = status + self.status_message = status_message + self.client_token = client_token + + def startElement(self, name, attrs, connection): + if name == 'instanceCounts': + self.instance_counts = ResultSet([('item', InstanceCount)]) + return self.instance_counts + elif name == 'priceSchedules': + self.price_schedules = ResultSet([('item', PriceSchedule)]) + return self.price_schedules + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesListingId': + self.listing_id = value + elif name == 'reservedInstancesId': + self.id = value + elif name == 'createDate': + self.create_date = value + elif name == 'updateDate': + self.update_date = value + elif name == 'status': + self.status = value + elif name == 'statusMessage': + self.status_message = value + else: + setattr(self, name, value) + + +class InstanceCount(object): + def __init__(self, connection=None, state=None, instance_count=None): + self.state = state + self.instance_count = instance_count + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'state': + self.state = value + elif name == 'instanceCount': + self.instance_count = int(value) + else: + setattr(self, name, value) + + +class PriceSchedule(object): + def __init__(self, connection=None, term=None, price=None, + currency_code=None, active=None): + self.connection = connection + self.term = term + self.price = price + self.currency_code = currency_code + self.active = active + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'term': + self.term = int(value) + elif name == 'price': + self.price = value + elif name == 'currencyCode': + self.currency_code = value + elif name == 'active': + self.active = True if value == 'true' else False + else: + setattr(self, name, value) + + +class ReservedInstancesConfiguration(object): + def __init__(self, connection=None, availability_zone=None, platform=None, + instance_count=None, instance_type=None): + self.connection = connection + self.availability_zone = availability_zone + self.platform = platform + self.instance_count = instance_count + self.instance_type = instance_type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'availabilityZone': + self.availability_zone = value + elif name == 'platform': + self.platform = value + elif name == 'instanceCount': + self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value + else: + setattr(self, name, value) + + +class ModifyReservedInstancesResult(object): + def __init__(self, connection=None, modification_id=None): + self.connection = connection + self.modification_id = modification_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + else: + setattr(self, name, value) + + +class ModificationResult(object): + def __init__(self, connection=None, modification_id=None, + availability_zone=None, platform=None, instance_count=None, + instance_type=None): + self.connection = connection + self.modification_id = modification_id + self.availability_zone = availability_zone + self.platform = platform + self.instance_count = instance_count + self.instance_type = instance_type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'platform': + self.platform = value + elif name == 'instanceCount': + self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value + else: + setattr(self, name, value) + + +class ReservedInstancesModification(object): + def __init__(self, connection=None, modification_id=None, + reserved_instances=None, modification_results=None, + create_date=None, update_date=None, effective_date=None, + status=None, status_message=None, client_token=None): + self.connection = connection + self.modification_id = modification_id + self.reserved_instances = reserved_instances + self.modification_results = modification_results + self.create_date = create_date + self.update_date = update_date + self.effective_date = effective_date + self.status = status + self.status_message = status_message + self.client_token = client_token + + def startElement(self, name, attrs, connection): + if name == 'reservedInstancesSet': + self.reserved_instances = ResultSet([ + ('item', ReservedInstance) + ]) + return self.reserved_instances + elif name == 'modificationResultSet': + self.modification_results = ResultSet([ + ('item', ModificationResult) + ]) + return self.modification_results + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesModificationId': + self.modification_id = value + elif name == 'createDate': + self.create_date = parse_ts(value) + elif name == 'updateDate': + self.update_date = parse_ts(value) + elif name == 'effectiveDate': + self.effective_date = parse_ts(value) + elif name == 'status': + self.status = value + elif name == 'statusMessage': + self.status_message = value + elif name == 'clientToken': + self.client_token = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/securitygroup.py b/ext/boto/ec2/securitygroup.py new file mode 100644 index 0000000000..dec49cb278 --- /dev/null +++ b/ext/boto/ec2/securitygroup.py @@ -0,0 +1,392 @@ +# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Security Group +""" +from boto.ec2.ec2object import TaggedEC2Object +from boto.exception import BotoClientError + + +class SecurityGroup(TaggedEC2Object): + + def __init__(self, connection=None, owner_id=None, + name=None, description=None, id=None): + super(SecurityGroup, self).__init__(connection) + self.id = id + self.owner_id = owner_id + self.name = name + self.description = description + self.vpc_id = None + self.rules = IPPermissionsList() + self.rules_egress = IPPermissionsList() + + def __repr__(self): + return 'SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + retval = super(SecurityGroup, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'ipPermissions': + return self.rules + elif name == 'ipPermissionsEgress': + return self.rules_egress + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'groupId': + self.id = value + elif name == 'groupName': + self.name = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'groupDescription': + self.description = value + elif name == 'ipRanges': + pass + elif name == 'return': + if value == 'false': + self.status = False + elif value == 'true': + self.status = True + else: + raise Exception( + 'Unexpected value of status %s for group %s' % ( + value, + self.name + ) + ) + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + if self.vpc_id: + return self.connection.delete_security_group( + group_id=self.id, + dry_run=dry_run + ) + else: + return self.connection.delete_security_group( + self.name, + dry_run=dry_run + ) + + def add_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip, + src_group_group_id, dry_run=False): + """ + Add a rule to the SecurityGroup object. Note that this method + only changes the local version of the object. No information + is sent to EC2. + """ + rule = IPPermissions(self) + rule.ip_protocol = ip_protocol + rule.from_port = from_port + rule.to_port = to_port + self.rules.append(rule) + rule.add_grant( + src_group_name, + src_group_owner_id, + cidr_ip, + src_group_group_id, + dry_run=dry_run + ) + + def remove_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip, + src_group_group_id, dry_run=False): + """ + Remove a rule to the SecurityGroup object. Note that this method + only changes the local version of the object. No information + is sent to EC2. + """ + if not self.rules: + raise ValueError("The security group has no rules") + + target_rule = None + for rule in self.rules: + if rule.ip_protocol == ip_protocol: + if rule.from_port == from_port: + if rule.to_port == to_port: + target_rule = rule + target_grant = None + for grant in rule.grants: + if grant.name == src_group_name or grant.group_id == src_group_group_id: + if grant.owner_id == src_group_owner_id: + if grant.cidr_ip == cidr_ip: + target_grant = grant + if target_grant: + rule.grants.remove(target_grant) + if len(rule.grants) == 0: + self.rules.remove(target_rule) + + def authorize(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None, dry_run=False): + """ + Add a new rule to this security group. + You need to pass in either src_group_name + OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type cidr_ip: string or list of strings + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or + :class:`boto.ec2.securitygroup.GroupOrCIDR` + :param src_group: The Security Group you are granting access to. + + :rtype: bool + :return: True if successful. + """ + group_name = None + if not self.vpc_id: + group_name = self.name + group_id = None + if self.vpc_id: + group_id = self.id + src_group_name = None + src_group_owner_id = None + src_group_group_id = None + if src_group: + cidr_ip = None + src_group_owner_id = src_group.owner_id + if not self.vpc_id: + src_group_name = src_group.name + else: + if hasattr(src_group, 'group_id'): + src_group_group_id = src_group.group_id + else: + src_group_group_id = src_group.id + status = self.connection.authorize_security_group(group_name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip, + group_id, + src_group_group_id, + dry_run=dry_run) + if status: + if not isinstance(cidr_ip, list): + cidr_ip = [cidr_ip] + for single_cidr_ip in cidr_ip: + self.add_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, single_cidr_ip, + src_group_group_id, dry_run=dry_run) + return status + + def revoke(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None, dry_run=False): + group_name = None + if not self.vpc_id: + group_name = self.name + group_id = None + if self.vpc_id: + group_id = self.id + src_group_name = None + src_group_owner_id = None + src_group_group_id = None + if src_group: + cidr_ip = None + src_group_owner_id = src_group.owner_id + if not self.vpc_id: + src_group_name = src_group.name + else: + if hasattr(src_group, 'group_id'): + src_group_group_id = src_group.group_id + else: + src_group_group_id = src_group.id + status = self.connection.revoke_security_group(group_name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip, + group_id, + src_group_group_id, + dry_run=dry_run) + if status: + self.remove_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, cidr_ip, src_group_group_id, + dry_run=dry_run) + return status + + def copy_to_region(self, region, name=None, dry_run=False): + """ + Create a copy of this security group in another region. + Note that the new security group will be a separate entity + and will not stay in sync automatically after the copy + operation. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :type name: string + :param name: The name of the copy. If not supplied, the copy + will have the same name as this security group. + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The new security group. + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + sg = rconn.create_security_group( + name or self.name, + self.description, + dry_run=dry_run + ) + source_groups = [] + for rule in self.rules: + for grant in rule.grants: + grant_nom = grant.name or grant.group_id + if grant_nom: + if grant_nom not in source_groups: + source_groups.append(grant_nom) + sg.authorize(None, None, None, None, grant, + dry_run=dry_run) + else: + sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port, + grant.cidr_ip, dry_run=dry_run) + return sg + + def instances(self, dry_run=False): + """ + Find all of the current instances that are running within this + security group. + + :rtype: list of :class:`boto.ec2.instance.Instance` + :return: A list of Instance objects + """ + rs = [] + if self.vpc_id: + rs.extend(self.connection.get_all_reservations( + filters={'instance.group-id': self.id}, + dry_run=dry_run + )) + else: + rs.extend(self.connection.get_all_reservations( + filters={'group-id': self.id}, + dry_run=dry_run + )) + instances = [i for r in rs for i in r.instances] + return instances + + +class IPPermissionsList(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + self.append(IPPermissions(self)) + return self[-1] + return None + + def endElement(self, name, value, connection): + pass + + +class IPPermissions(object): + + def __init__(self, parent=None): + self.parent = parent + self.ip_protocol = None + self.from_port = None + self.to_port = None + self.grants = [] + + def __repr__(self): + return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol, + self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + if name == 'item': + self.grants.append(GroupOrCIDR(self)) + return self.grants[-1] + return None + + def endElement(self, name, value, connection): + if name == 'ipProtocol': + self.ip_protocol = value + elif name == 'fromPort': + self.from_port = value + elif name == 'toPort': + self.to_port = value + else: + setattr(self, name, value) + + def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None, + dry_run=False): + grant = GroupOrCIDR(self) + grant.owner_id = owner_id + grant.group_id = group_id + grant.name = name + grant.cidr_ip = cidr_ip + self.grants.append(grant) + return grant + + +class GroupOrCIDR(object): + + def __init__(self, parent=None): + self.owner_id = None + self.group_id = None + self.name = None + self.cidr_ip = None + + def __repr__(self): + if self.cidr_ip: + return '%s' % self.cidr_ip + else: + return '%s-%s' % (self.name or self.group_id, self.owner_id) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'userId': + self.owner_id = value + elif name == 'groupId': + self.group_id = value + elif name == 'groupName': + self.name = value + if name == 'cidrIp': + self.cidr_ip = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/snapshot.py b/ext/boto/ec2/snapshot.py new file mode 100644 index 0000000000..4db301f442 --- /dev/null +++ b/ext/boto/ec2/snapshot.py @@ -0,0 +1,202 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Block Store Snapshot +""" +from boto.ec2.ec2object import TaggedEC2Object +from boto.ec2.zone import Zone + + +class Snapshot(TaggedEC2Object): + """ + Represents an EBS snapshot. + :ivar id: The unique ID of the snapshot. + :ivar volume_id: The ID of the volume this snapshot was created + from. + :ivar status: The status of the snapshot. + :ivar progress: The percent complete of the snapshot. + :ivar start_time: The timestamp of when the snapshot was created. + :ivar owner_id: The id of the account that owns the snapshot. + :ivar owner_alias: The alias of the account that owns the snapshot. + :ivar volume_size: The size (in GB) of the volume the snapshot was created from. + :ivar description: The description of the snapshot. + :ivar encrypted: True if this snapshot is encrypted + """ + + AttrName = 'createVolumePermission' + + def __init__(self, connection=None): + super(Snapshot, self).__init__(connection) + self.id = None + self.volume_id = None + self.status = None + self.progress = None + self.start_time = None + self.owner_id = None + self.owner_alias = None + self.volume_size = None + self.description = None + self.encrypted = None + + def __repr__(self): + return 'Snapshot:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'snapshotId': + self.id = value + elif name == 'volumeId': + self.volume_id = value + elif name == 'status': + self.status = value + elif name == 'startTime': + self.start_time = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'ownerAlias': + self.owner_alias = value + elif name == 'volumeSize': + try: + self.volume_size = int(value) + except: + self.volume_size = value + elif name == 'description': + self.description = value + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.progress = updated.progress + self.status = updated.status + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this snapshot by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + snapshot the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_snapshots([self.id], dry_run=dry_run) + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid Snapshot ID' % self.id) + return self.progress + + def delete(self, dry_run=False): + return self.connection.delete_snapshot(self.id, dry_run=dry_run) + + def get_permissions(self, dry_run=False): + attrs = self.connection.get_snapshot_attribute( + self.id, + self.AttrName, + dry_run=dry_run + ) + return attrs.attrs + + def share(self, user_ids=None, groups=None, dry_run=False): + return self.connection.modify_snapshot_attribute(self.id, + self.AttrName, + 'add', + user_ids, + groups, + dry_run=dry_run) + + def unshare(self, user_ids=None, groups=None, dry_run=False): + return self.connection.modify_snapshot_attribute(self.id, + self.AttrName, + 'remove', + user_ids, + groups, + dry_run=dry_run) + + def reset_permissions(self, dry_run=False): + return self.connection.reset_snapshot_attribute( + self.id, + self.AttrName, + dry_run=dry_run + ) + + def create_volume(self, zone, size=None, volume_type=None, iops=None, + dry_run=False): + """ + Create a new EBS Volume from this Snapshot + + :type zone: string or :class:`boto.ec2.zone.Zone` + :param zone: The availability zone in which the Volume will be created. + + :type size: int + :param size: The size of the new volume, in GiB. (optional). Defaults to + the size of the snapshot. + + :type volume_type: string + :param volume_type: The type of the volume. (optional). Valid + values are: standard | io1 | gp2. + + :type iops: int + :param iops: The provisioned IOPs you want to associate with + this volume. (optional) + """ + if isinstance(zone, Zone): + zone = zone.name + return self.connection.create_volume( + size, + zone, + self.id, + volume_type, + iops, + self.encrypted, + dry_run=dry_run + ) + + +class SnapshotAttribute(object): + def __init__(self, parent=None): + self.snapshot_id = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'createVolumePermission': + self.name = 'create_volume_permission' + elif name == 'group': + if 'groups' in self.attrs: + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if 'user_ids' in self.attrs: + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'snapshotId': + self.snapshot_id = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/spotdatafeedsubscription.py b/ext/boto/ec2/spotdatafeedsubscription.py new file mode 100644 index 0000000000..d0e0be8d3c --- /dev/null +++ b/ext/boto/ec2/spotdatafeedsubscription.py @@ -0,0 +1,65 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Datafeed Subscription +""" +from boto.ec2.ec2object import EC2Object +from boto.ec2.spotinstancerequest import SpotInstanceStateFault + + +class SpotDatafeedSubscription(EC2Object): + + def __init__(self, connection=None, owner_id=None, + bucket=None, prefix=None, state=None, fault=None): + super(SpotDatafeedSubscription, self).__init__(connection) + self.owner_id = owner_id + self.bucket = bucket + self.prefix = prefix + self.state = state + self.fault = fault + + def __repr__(self): + return 'SpotDatafeedSubscription:%s' % self.bucket + + def startElement(self, name, attrs, connection): + if name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_spot_datafeed_subscription( + dry_run=dry_run + ) diff --git a/ext/boto/ec2/spotinstancerequest.py b/ext/boto/ec2/spotinstancerequest.py new file mode 100644 index 0000000000..da087fefdb --- /dev/null +++ b/ext/boto/ec2/spotinstancerequest.py @@ -0,0 +1,192 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.ec2.launchspecification import LaunchSpecification + + +class SpotInstanceStateFault(object): + """ + The fault codes for the Spot Instance request, if any. + + :ivar code: The reason code for the Spot Instance state change. + :ivar message: The message for the Spot Instance state change. + """ + + def __init__(self, code=None, message=None): + self.code = code + self.message = message + + def __repr__(self): + return '(%s, %s)' % (self.code, self.message) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + setattr(self, name, value) + + +class SpotInstanceStatus(object): + """ + Contains the status of a Spot Instance Request. + + :ivar code: Status code of the request. + :ivar message: The description for the status code for the Spot request. + :ivar update_time: Time the status was stated. + """ + + def __init__(self, code=None, update_time=None, message=None): + self.code = code + self.update_time = update_time + self.message = message + + def __repr__(self): + return '' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + elif name == 'updateTime': + self.update_time = value + + +class SpotInstanceRequest(TaggedEC2Object): + """ + + :ivar id: The ID of the Spot Instance Request. + :ivar price: The maximum hourly price for any Spot Instance launched to + fulfill the request. + :ivar type: The Spot Instance request type. + :ivar state: The state of the Spot Instance request. + :ivar fault: The fault codes for the Spot Instance request, if any. + :ivar valid_from: The start date of the request. If this is a one-time + request, the request becomes active at this date and time and remains + active until all instances launch, the request expires, or the request is + canceled. If the request is persistent, the request becomes active at this + date and time and remains active until it expires or is canceled. + :ivar valid_until: The end date of the request. If this is a one-time + request, the request remains active until all instances launch, the request + is canceled, or this date is reached. If the request is persistent, it + remains active until it is canceled or this date is reached. + :ivar launch_group: The instance launch group. Launch groups are Spot + Instances that launch together and terminate together. + :ivar launched_availability_zone: foo + :ivar product_description: The Availability Zone in which the bid is + launched. + :ivar availability_zone_group: The Availability Zone group. If you specify + the same Availability Zone group for all Spot Instance requests, all Spot + Instances are launched in the same Availability Zone. + :ivar create_time: The time stamp when the Spot Instance request was + created. + :ivar launch_specification: Additional information for launching instances. + :ivar instance_id: The instance ID, if an instance has been launched to + fulfill the Spot Instance request. + :ivar status: The status code and status message describing the Spot + Instance request. + + """ + + def __init__(self, connection=None): + super(SpotInstanceRequest, self).__init__(connection) + self.id = None + self.price = None + self.type = None + self.state = None + self.fault = None + self.valid_from = None + self.valid_until = None + self.launch_group = None + self.launched_availability_zone = None + self.product_description = None + self.availability_zone_group = None + self.create_time = None + self.launch_specification = None + self.instance_id = None + self.status = None + + def __repr__(self): + return 'SpotInstanceRequest:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(SpotInstanceRequest, self).startElement(name, attrs, + connection) + if retval is not None: + return retval + if name == 'launchSpecification': + self.launch_specification = LaunchSpecification(connection) + return self.launch_specification + elif name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + elif name == 'status': + self.status = SpotInstanceStatus() + return self.status + else: + return None + + def endElement(self, name, value, connection): + if name == 'spotInstanceRequestId': + self.id = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'validFrom': + self.valid_from = value + elif name == 'validUntil': + self.valid_until = value + elif name == 'launchGroup': + self.launch_group = value + elif name == 'availabilityZoneGroup': + self.availability_zone_group = value + elif name == 'launchedAvailabilityZone': + self.launched_availability_zone = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'createTime': + self.create_time = value + elif name == 'productDescription': + self.product_description = value + else: + setattr(self, name, value) + + def cancel(self, dry_run=False): + self.connection.cancel_spot_instance_requests( + [self.id], + dry_run=dry_run + ) diff --git a/ext/boto/ec2/spotpricehistory.py b/ext/boto/ec2/spotpricehistory.py new file mode 100644 index 0000000000..ac125de683 --- /dev/null +++ b/ext/boto/ec2/spotpricehistory.py @@ -0,0 +1,54 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import EC2Object + + +class SpotPriceHistory(EC2Object): + + def __init__(self, connection=None): + super(SpotPriceHistory, self).__init__(connection) + self.price = 0.0 + self.instance_type = None + self.product_description = None + self.timestamp = None + self.availability_zone = None + + def __repr__(self): + return 'SpotPriceHistory(%s):%2f' % (self.instance_type, self.price) + + def endElement(self, name, value, connection): + if name == 'instanceType': + self.instance_type = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'productDescription': + self.product_description = value + elif name == 'timestamp': + self.timestamp = value + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/tag.py b/ext/boto/ec2/tag.py new file mode 100644 index 0000000000..deb2c78800 --- /dev/null +++ b/ext/boto/ec2/tag.py @@ -0,0 +1,84 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class TagSet(dict): + """ + A TagSet is used to collect the tags associated with a particular + EC2 resource. Not all resources can be tagged but for those that + can, this dict object will be used to collect those values. See + :class:`boto.ec2.ec2object.TaggedEC2Object` for more details. + """ + + def __init__(self, connection=None): + self.connection = connection + self._current_key = None + self._current_value = None + + def startElement(self, name, attrs, connection): + if name == 'item': + self._current_key = None + self._current_value = None + return None + + def endElement(self, name, value, connection): + if name == 'key': + self._current_key = value + elif name == 'value': + self._current_value = value + elif name == 'item': + self[self._current_key] = self._current_value + + +class Tag(object): + """ + A Tag is used when creating or listing all tags related to + an AWS account. It records not only the key and value but + also the ID of the resource to which the tag is attached + as well as the type of the resource. + """ + + def __init__(self, connection=None, res_id=None, res_type=None, + name=None, value=None): + self.connection = connection + self.res_id = res_id + self.res_type = res_type + self.name = name + self.value = value + + def __repr__(self): + return 'Tag:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'resourceId': + self.res_id = value + elif name == 'resourceType': + self.res_type = value + elif name == 'key': + self.name = value + elif name == 'value': + self.value = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/volume.py b/ext/boto/ec2/volume.py new file mode 100644 index 0000000000..c40062b37c --- /dev/null +++ b/ext/boto/ec2/volume.py @@ -0,0 +1,315 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Block Storage Volume +""" +from boto.resultset import ResultSet +from boto.ec2.tag import Tag +from boto.ec2.ec2object import TaggedEC2Object + + +class Volume(TaggedEC2Object): + """ + Represents an EBS volume. + + :ivar id: The unique ID of the volume. + :ivar create_time: The timestamp of when the volume was created. + :ivar status: The status of the volume. + :ivar size: The size (in GB) of the volume. + :ivar snapshot_id: The ID of the snapshot this volume was created + from, if applicable. + :ivar attach_data: An AttachmentSet object. + :ivar zone: The availability zone this volume is in. + :ivar type: The type of volume (standard or consistent-iops) + :ivar iops: If this volume is of type consistent-iops, this is + the number of IOPS provisioned (10-300). + :ivar encrypted: True if this volume is encrypted. + """ + + def __init__(self, connection=None): + super(Volume, self).__init__(connection) + self.id = None + self.create_time = None + self.status = None + self.size = None + self.snapshot_id = None + self.attach_data = None + self.zone = None + self.type = None + self.iops = None + self.encrypted = None + + def __repr__(self): + return 'Volume:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(Volume, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'attachmentSet': + self.attach_data = AttachmentSet() + return self.attach_data + elif name == 'tagSet': + self.tags = ResultSet([('item', Tag)]) + return self.tags + else: + return None + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'createTime': + self.create_time = value + elif name == 'status': + if value != '': + self.status = value + elif name == 'size': + self.size = int(value) + elif name == 'snapshotId': + self.snapshot_id = value + elif name == 'availabilityZone': + self.zone = value + elif name == 'volumeType': + self.type = value + elif name == 'iops': + self.iops = int(value) + elif name == 'encrypted': + self.encrypted = (value.lower() == 'true') + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + """ + Update the data associated with this volume by querying EC2. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + volume the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + # Check the resultset since Eucalyptus ignores the volumeId param + unfiltered_rs = self.connection.get_all_volumes( + [self.id], + dry_run=dry_run + ) + rs = [x for x in unfiltered_rs if x.id == self.id] + if len(rs) > 0: + self._update(rs[0]) + elif validate: + raise ValueError('%s is not a valid Volume ID' % self.id) + return self.status + + def delete(self, dry_run=False): + """ + Delete this EBS volume. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_volume(self.id, dry_run=dry_run) + + def attach(self, instance_id, device, dry_run=False): + """ + Attach this EBS volume to an EC2 instance. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposed (e.g. /dev/sdh) + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_volume( + self.id, + instance_id, + device, + dry_run=dry_run + ) + + def detach(self, force=False, dry_run=False): + """ + Detach this EBS volume from an EC2 instance. + + :type force: bool + :param force: Forces detachment if the previous detachment + attempt did not occur cleanly. This option can lead to + data loss or a corrupted file system. Use this option only + as a last resort to detach a volume from a failed + instance. The instance will not have an opportunity to + flush file system caches nor file system meta data. If you + use this option, you must perform file system check and + repair procedures. + + :rtype: bool + :return: True if successful + """ + instance_id = None + if self.attach_data: + instance_id = self.attach_data.instance_id + device = None + if self.attach_data: + device = self.attach_data.device + return self.connection.detach_volume( + self.id, + instance_id, + device, + force, + dry_run=dry_run + ) + + def create_snapshot(self, description=None, dry_run=False): + """ + Create a snapshot of this EBS Volume. + + :type description: str + :param description: A description of the snapshot. + Limited to 256 characters. + + :rtype: :class:`boto.ec2.snapshot.Snapshot` + :return: The created Snapshot object + """ + return self.connection.create_snapshot( + self.id, + description, + dry_run=dry_run + ) + + def volume_state(self): + """ + Returns the state of the volume. Same value as the status attribute. + """ + return self.status + + def attachment_state(self): + """ + Get the attachment state. + """ + state = None + if self.attach_data: + state = self.attach_data.status + return state + + def snapshots(self, owner=None, restorable_by=None, dry_run=False): + """ + Get all snapshots related to this volume. Note that this requires + that all available snapshots for the account be retrieved from EC2 + first and then the list is filtered client-side to contain only + those for this volume. + + :type owner: str + :param owner: If present, only the snapshots owned by the + specified user will be returned. Valid values are: + + * self + * amazon + * AWS Account ID + + :type restorable_by: str + :param restorable_by: If present, only the snapshots that + are restorable by the specified account id will be returned. + + :rtype: list of L{boto.ec2.snapshot.Snapshot} + :return: The requested Snapshot objects + + """ + rs = self.connection.get_all_snapshots( + owner=owner, + restorable_by=restorable_by, + dry_run=dry_run + ) + mine = [] + for snap in rs: + if snap.volume_id == self.id: + mine.append(snap) + return mine + + +class AttachmentSet(object): + """ + Represents an EBS attachmentset. + + :ivar id: The unique ID of the volume. + :ivar instance_id: The unique ID of the attached instance + :ivar status: The status of the attachment + :ivar attach_time: Attached since + :ivar device: The device the instance has mapped + """ + def __init__(self): + self.id = None + self.instance_id = None + self.status = None + self.attach_time = None + self.device = None + + def __repr__(self): + return 'AttachmentSet:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'device': + self.device = value + else: + setattr(self, name, value) + + +class VolumeAttribute(object): + def __init__(self, parent=None): + self.id = None + self._key_name = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + if name == 'autoEnableIO': + self._key_name = name + return None + + def endElement(self, name, value, connection): + if name == 'value': + if value.lower() == 'true': + self.attrs[self._key_name] = True + else: + self.attrs[self._key_name] = False + elif name == 'volumeId': + self.id = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2/volumestatus.py b/ext/boto/ec2/volumestatus.py new file mode 100644 index 0000000000..78de2bb04f --- /dev/null +++ b/ext/boto/ec2/volumestatus.py @@ -0,0 +1,205 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.instancestatus import Status, Details + + +class Event(object): + """ + A status event for an instance. + + :ivar type: The type of the event. + :ivar id: The ID of the event. + :ivar description: A string describing the reason for the event. + :ivar not_before: A datestring describing the earliest time for + the event. + :ivar not_after: A datestring describing the latest time for + the event. + """ + + def __init__(self, type=None, id=None, description=None, + not_before=None, not_after=None): + self.type = type + self.id = id + self.description = description + self.not_before = not_before + self.not_after = not_after + + def __repr__(self): + return 'Event:%s' % self.type + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'eventType': + self.type = value + elif name == 'eventId': + self.id = value + elif name == 'description': + self.description = value + elif name == 'notBefore': + self.not_before = value + elif name == 'notAfter': + self.not_after = value + else: + setattr(self, name, value) + + +class EventSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + event = Event() + self.append(event) + return event + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class Action(object): + """ + An action for an instance. + + :ivar code: The code for the type of the action. + :ivar id: The ID of the event. + :ivar type: The type of the event. + :ivar description: A description of the action. + """ + + def __init__(self, code=None, id=None, description=None, type=None): + self.code = code + self.id = id + self.type = type + self.description = description + + def __repr__(self): + return 'Action:%s' % self.code + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'eventType': + self.type = value + elif name == 'eventId': + self.id = value + elif name == 'description': + self.description = value + elif name == 'code': + self.code = value + else: + setattr(self, name, value) + + +class ActionSet(list): + + def startElement(self, name, attrs, connection): + if name == 'item': + action = Action() + self.append(action) + return action + else: + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class VolumeStatus(object): + """ + Represents an EC2 Volume status as reported by + DescribeVolumeStatus request. + + :ivar id: The volume identifier. + :ivar zone: The availability zone of the volume + :ivar volume_status: A Status object that reports impaired + functionality that arises from problems internal to the instance. + :ivar events: A list of events relevant to the instance. + :ivar actions: A list of events relevant to the instance. + """ + + def __init__(self, id=None, zone=None): + self.id = id + self.zone = zone + self.volume_status = Status() + self.events = None + self.actions = None + + def __repr__(self): + return 'VolumeStatus:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'eventsSet': + self.events = EventSet() + return self.events + elif name == 'actionsSet': + self.actions = ActionSet() + return self.actions + elif name == 'volumeStatus': + return self.volume_status + else: + return None + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'availabilityZone': + self.zone = value + else: + setattr(self, name, value) + + +class VolumeStatusSet(list): + """ + A list object that contains the results of a call to + DescribeVolumeStatus request. Each element of the + list will be an VolumeStatus object. + + :ivar next_token: If the response was truncated by + the EC2 service, the next_token attribute of the + object will contain the string that needs to be + passed in to the next request to retrieve the next + set of results. + """ + + def __init__(self, connection=None): + list.__init__(self) + self.connection = connection + self.next_token = None + + def startElement(self, name, attrs, connection): + if name == 'item': + status = VolumeStatus() + self.append(status) + return status + else: + return None + + def endElement(self, name, value, connection): + if name == 'NextToken': + self.next_token = value + setattr(self, name, value) diff --git a/ext/boto/ec2/zone.py b/ext/boto/ec2/zone.py new file mode 100644 index 0000000000..85ed10224b --- /dev/null +++ b/ext/boto/ec2/zone.py @@ -0,0 +1,78 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Availability Zone +""" +from boto.ec2.ec2object import EC2Object + + +class MessageSet(list): + """ + A list object that contains messages associated with + an availability zone. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'message': + self.append(value) + else: + setattr(self, name, value) + + +class Zone(EC2Object): + """ + Represents an Availability Zone. + + :ivar name: The name of the zone. + :ivar state: The current state of the zone. + :ivar region_name: The name of the region the zone is associated with. + :ivar messages: A list of messages related to the zone. + """ + + def __init__(self, connection=None): + super(Zone, self).__init__(connection) + self.name = None + self.state = None + self.region_name = None + self.messages = None + + def __repr__(self): + return 'Zone:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'messageSet': + self.messages = MessageSet() + return self.messages + return None + + def endElement(self, name, value, connection): + if name == 'zoneName': + self.name = value + elif name == 'zoneState': + self.state = value + elif name == 'regionName': + self.region_name = value + else: + setattr(self, name, value) diff --git a/ext/boto/ec2containerservice/__init__.py b/ext/boto/ec2containerservice/__init__.py new file mode 100644 index 0000000000..136082d101 --- /dev/null +++ b/ext/boto/ec2containerservice/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon EC2 Container Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection + return get_regions('ec2containerservice', + connection_cls=EC2ContainerServiceConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection + return connect('ec2containerservice', region_name, + connection_cls=EC2ContainerServiceConnection, **kw_params) diff --git a/ext/boto/ec2containerservice/exceptions.py b/ext/boto/ec2containerservice/exceptions.py new file mode 100644 index 0000000000..4ad32aeaa9 --- /dev/null +++ b/ext/boto/ec2containerservice/exceptions.py @@ -0,0 +1,31 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class ServerException(BotoServerError): + pass + + +class ClientException(BotoServerError): + pass diff --git a/ext/boto/ec2containerservice/layer1.py b/ext/boto/ec2containerservice/layer1.py new file mode 100644 index 0000000000..4168bdd017 --- /dev/null +++ b/ext/boto/ec2containerservice/layer1.py @@ -0,0 +1,748 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.ec2containerservice import exceptions + + +class EC2ContainerServiceConnection(AWSQueryConnection): + """ + Amazon EC2 Container Service (Amazon ECS) is a highly scalable, + fast, container management service that makes it easy to run, + stop, and manage Docker containers on a cluster of Amazon EC2 + instances. Amazon ECS lets you launch and stop container-enabled + applications with simple API calls, allows you to get the state of + your cluster from a centralized service, and gives you access to + many familiar Amazon EC2 features like security groups, Amazon EBS + volumes, and IAM roles. + + You can use Amazon ECS to schedule the placement of containers + across your cluster based on your resource needs, isolation + policies, and availability requirements. Amazon EC2 Container + Service eliminates the need for you to operate your own cluster + management and configuration management systems or worry about + scaling your management infrastructure. + """ + APIVersion = "2014-11-13" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "ecs.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "ServerException": exceptions.ServerException, + "ClientException": exceptions.ClientException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(EC2ContainerServiceConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_cluster(self, cluster_name=None): + """ + Creates a new Amazon ECS cluster. By default, your account + will receive a `default` cluster when you launch your first + container instance. However, you can create your own cluster + with a unique name with the `CreateCluster` action. + + During the preview, each account is limited to two clusters. + + :type cluster_name: string + :param cluster_name: The name of your cluster. If you do not specify a + name for your cluster, you will create a cluster named `default`. + + """ + params = {} + if cluster_name is not None: + params['clusterName'] = cluster_name + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster): + """ + Deletes the specified cluster. You must deregister all + container instances from this cluster before you may delete + it. You can list the container instances in a cluster with + ListContainerInstances and deregister them with + DeregisterContainerInstance. + + :type cluster: string + :param cluster: The cluster you want to delete. + + """ + params = {'cluster': cluster, } + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def deregister_container_instance(self, container_instance, cluster=None, + force=None): + """ + Deregisters an Amazon ECS container instance from the + specified cluster. This instance will no longer be available + to run tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instance you want to + deregister. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance you want to + deregister. The ARN contains the `arn:aws:ecs` namespace, followed + by the region of the container instance, the AWS account ID of the + container instance owner, the `container-instance` namespace, and + then the container instance UUID. For example, arn:aws:ecs: region + : aws_account_id :container-instance/ container_instance_UUID . + + :type force: boolean + :param force: Force the deregistration of the container instance. You + can use the `force` parameter if you have several tasks running on + a container instance and you don't want to run `StopTask` for each + task before deregistering the container instance. + + """ + params = {'containerInstance': container_instance, } + if cluster is not None: + params['cluster'] = cluster + if force is not None: + params['force'] = str( + force).lower() + return self._make_request( + action='DeregisterContainerInstance', + verb='POST', + path='/', params=params) + + def deregister_task_definition(self, task_definition): + """ + Deregisters the specified task definition. You will no longer + be able to run tasks from this definition after + deregistration. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to deregister. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DeregisterTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_clusters(self, clusters=None): + """ + Describes one or more of your clusters. + + :type clusters: list + :param clusters: A space-separated list of cluster names or full + cluster Amazon Resource Name (ARN) entries. If you do not specify a + cluster, the default cluster is assumed. + + """ + params = {} + if clusters is not None: + self.build_list_params(params, + clusters, + 'clusters.member') + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_container_instances(self, container_instances, cluster=None): + """ + Describes Amazon EC2 Container Service container instances. + Returns metadata about registered and remaining resources on + each container instance requested. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to + describe. If you do not specify a cluster, the default cluster is + assumed. + + :type container_instances: list + :param container_instances: A space-separated list of container + instance UUIDs or full Amazon Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeContainerInstances', + verb='POST', + path='/', params=params) + + def describe_task_definition(self, task_definition): + """ + Describes a task definition. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to describe. + + """ + params = {'taskDefinition': task_definition, } + return self._make_request( + action='DescribeTaskDefinition', + verb='POST', + path='/', params=params) + + def describe_tasks(self, tasks, cluster=None): + """ + Describes a specified task or tasks. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to describe. If you do not + specify a cluster, the default cluster is assumed. + + :type tasks: list + :param tasks: A space-separated list of task UUIDs or full Amazon + Resource Name (ARN) entries. + + """ + params = {} + self.build_list_params(params, + tasks, + 'tasks.member') + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='DescribeTasks', + verb='POST', + path='/', params=params) + + def discover_poll_endpoint(self, container_instance=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Returns an endpoint for the Amazon EC2 Container Service agent + to poll for updates. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance. The ARN contains the + `arn:aws:ecs` namespace, followed by the region of the container + instance, the AWS account ID of the container instance owner, the + `container-instance` namespace, and then the container instance + UUID. For example, arn:aws:ecs: region : aws_account_id :container- + instance/ container_instance_UUID . + + """ + params = {} + if container_instance is not None: + params['containerInstance'] = container_instance + return self._make_request( + action='DiscoverPollEndpoint', + verb='POST', + path='/', params=params) + + def list_clusters(self, next_token=None, max_results=None): + """ + Returns a list of existing clusters. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListClusters` request where `maxResults` was used and + the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of cluster results returned by + `ListClusters` in paginated output. When this parameter is used, + `ListClusters` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListClusters` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then + `ListClusters` returns up to 100 results and a `nextToken` value if + applicable. + + """ + params = {} + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListClusters', + verb='POST', + path='/', params=params) + + def list_container_instances(self, cluster=None, next_token=None, + max_results=None): + """ + Returns a list of container instances in a specified cluster. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container instances you want to list. If + you do not specify a cluster, the default cluster is assumed.. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListContainerInstances` request where `maxResults` was + used and the results exceeded the value of that parameter. + Pagination continues from the end of the previous results that + returned the `nextToken` value. This value is `null` when there are + no more results to return. + + :type max_results: integer + :param max_results: The maximum number of container instance results + returned by `ListContainerInstances` in paginated output. When this + parameter is used, `ListContainerInstances` only returns + `maxResults` results in a single page along with a `nextToken` + response element. The remaining results of the initial request can + be seen by sending another `ListContainerInstances` request with + the returned `nextToken` value. This value can be between 1 and + 100. If this parameter is not used, then `ListContainerInstances` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListContainerInstances', + verb='POST', + path='/', params=params) + + def list_task_definitions(self, family_prefix=None, next_token=None, + max_results=None): + """ + Returns a list of task definitions that are registered to your + account. You can filter the results by family name with the + `familyPrefix` parameter. + + :type family_prefix: string + :param family_prefix: The name of the family that you want to filter + the `ListTaskDefinitions` results with. Specifying a `familyPrefix` + will limit the listed task definitions to definitions that belong + to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTaskDefinitions` request where `maxResults` was used + and the results exceeded the value of that parameter. Pagination + continues from the end of the previous results that returned the + `nextToken` value. This value is `null` when there are no more + results to return. + + :type max_results: integer + :param max_results: The maximum number of task definition results + returned by `ListTaskDefinitions` in paginated output. When this + parameter is used, `ListTaskDefinitions` only returns `maxResults` + results in a single page along with a `nextToken` response element. + The remaining results of the initial request can be seen by sending + another `ListTaskDefinitions` request with the returned `nextToken` + value. This value can be between 1 and 100. If this parameter is + not used, then `ListTaskDefinitions` returns up to 100 results and + a `nextToken` value if applicable. + + """ + params = {} + if family_prefix is not None: + params['familyPrefix'] = family_prefix + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTaskDefinitions', + verb='POST', + path='/', params=params) + + def list_tasks(self, cluster=None, container_instance=None, family=None, + next_token=None, max_results=None): + """ + Returns a list of tasks for a specified cluster. You can + filter the results by family name or by a particular container + instance with the `family` and `containerInstance` parameters. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the tasks you want to list. If you do not + specify a cluster, the default cluster is assumed.. + + :type container_instance: string + :param container_instance: The container instance UUID or full Amazon + Resource Name (ARN) of the container instance that you want to + filter the `ListTasks` results with. Specifying a + `containerInstance` will limit the results to tasks that belong to + that container instance. + + :type family: string + :param family: The name of the family that you want to filter the + `ListTasks` results with. Specifying a `family` will limit the + results to tasks that belong to that family. + + :type next_token: string + :param next_token: The `nextToken` value returned from a previous + paginated `ListTasks` request where `maxResults` was used and the + results exceeded the value of that parameter. Pagination continues + from the end of the previous results that returned the `nextToken` + value. This value is `null` when there are no more results to + return. + + :type max_results: integer + :param max_results: The maximum number of task results returned by + `ListTasks` in paginated output. When this parameter is used, + `ListTasks` only returns `maxResults` results in a single page + along with a `nextToken` response element. The remaining results of + the initial request can be seen by sending another `ListTasks` + request with the returned `nextToken` value. This value can be + between 1 and 100. If this parameter is not used, then `ListTasks` + returns up to 100 results and a `nextToken` value if applicable. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if container_instance is not None: + params['containerInstance'] = container_instance + if family is not None: + params['family'] = family + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self._make_request( + action='ListTasks', + verb='POST', + path='/', params=params) + + def register_container_instance(self, cluster=None, + instance_identity_document=None, + instance_identity_document_signature=None, + total_resources=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Registers an Amazon EC2 instance into the specified cluster. + This instance will become available to place containers on. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to register your container instance with. + If you do not specify a cluster, the default cluster is assumed.. + + :type instance_identity_document: string + :param instance_identity_document: + + :type instance_identity_document_signature: string + :param instance_identity_document_signature: + + :type total_resources: list + :param total_resources: + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if instance_identity_document is not None: + params['instanceIdentityDocument'] = instance_identity_document + if instance_identity_document_signature is not None: + params['instanceIdentityDocumentSignature'] = instance_identity_document_signature + if total_resources is not None: + self.build_complex_list_params( + params, total_resources, + 'totalResources.member', + ('name', 'type', 'doubleValue', 'longValue', 'integerValue', 'stringSetValue')) + return self._make_request( + action='RegisterContainerInstance', + verb='POST', + path='/', params=params) + + def register_task_definition(self, family, container_definitions): + """ + Registers a new task definition from the supplied `family` and + `containerDefinitions`. + + :type family: string + :param family: You can specify a `family` for a task definition, which + allows you to track multiple versions of the same task definition. + You can think of the `family` as a name for your task definition. + + :type container_definitions: list + :param container_definitions: A list of container definitions in JSON + format that describe the different containers that make up your + task. + + """ + params = {'family': family, } + self.build_complex_list_params( + params, container_definitions, + 'containerDefinitions.member', + ('name', 'image', 'cpu', 'memory', 'links', 'portMappings', 'essential', 'entryPoint', 'command', 'environment')) + return self._make_request( + action='RegisterTaskDefinition', + verb='POST', + path='/', params=params) + + def run_task(self, task_definition, cluster=None, overrides=None, + count=None): + """ + Start a task using random placement and the default Amazon ECS + scheduler. If you want to use your own scheduler or place a + task on a specific container instance, use `StartTask` + instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to run your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to run. + + :type overrides: dict + :param overrides: + + :type count: integer + :param count: The number of instances of the specified task that you + would like to place on your cluster. + + """ + params = {'taskDefinition': task_definition, } + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + if count is not None: + params['count'] = count + return self._make_request( + action='RunTask', + verb='POST', + path='/', params=params) + + def start_task(self, task_definition, container_instances, cluster=None, + overrides=None): + """ + Starts a new task from the specified task definition on the + specified container instance or instances. If you want to use + the default Amazon ECS scheduler to place your task, use + `RunTask` instead. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that you want to start your task on. If you do not + specify a cluster, the default cluster is assumed.. + + :type task_definition: string + :param task_definition: The `family` and `revision` ( + `family:revision`) or full Amazon Resource Name (ARN) of the task + definition that you want to start. + + :type overrides: dict + :param overrides: + + :type container_instances: list + :param container_instances: The container instance UUIDs or full Amazon + Resource Name (ARN) entries for the container instances on which + you would like to place your task. + + """ + params = {'taskDefinition': task_definition, } + self.build_list_params(params, + container_instances, + 'containerInstances.member') + if cluster is not None: + params['cluster'] = cluster + if overrides is not None: + params['overrides'] = overrides + return self._make_request( + action='StartTask', + verb='POST', + path='/', params=params) + + def stop_task(self, task, cluster=None): + """ + Stops a running task. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task you want to stop. If you do not + specify a cluster, the default cluster is assumed.. + + :type task: string + :param task: The task UUIDs or full Amazon Resource Name (ARN) entry of + the task you would like to stop. + + """ + params = {'task': task, } + if cluster is not None: + params['cluster'] = cluster + return self._make_request( + action='StopTask', + verb='POST', + path='/', params=params) + + def submit_container_state_change(self, cluster=None, task=None, + container_name=None, status=None, + exit_code=None, reason=None, + network_bindings=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a container changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the container. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task that hosts the container. + + :type container_name: string + :param container_name: The name of the container. + + :type status: string + :param status: The status of the state change request. + + :type exit_code: integer + :param exit_code: The exit code returned for the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + :type network_bindings: list + :param network_bindings: The network bindings of the container. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if container_name is not None: + params['containerName'] = container_name + if status is not None: + params['status'] = status + if exit_code is not None: + params['exitCode'] = exit_code + if reason is not None: + params['reason'] = reason + if network_bindings is not None: + self.build_complex_list_params( + params, network_bindings, + 'networkBindings.member', + ('bindIP', 'containerPort', 'hostPort')) + return self._make_request( + action='SubmitContainerStateChange', + verb='POST', + path='/', params=params) + + def submit_task_state_change(self, cluster=None, task=None, status=None, + reason=None): + """ + This action is only used by the Amazon EC2 Container Service + agent, and it is not intended for use outside of the agent. + + + Sent to acknowledge that a task changed states. + + :type cluster: string + :param cluster: The short name or full Amazon Resource Name (ARN) of + the cluster that hosts the task. + + :type task: string + :param task: The task UUID or full Amazon Resource Name (ARN) of the + task in the state change request. + + :type status: string + :param status: The status of the state change request. + + :type reason: string + :param reason: The reason for the state change request. + + """ + params = {} + if cluster is not None: + params['cluster'] = cluster + if task is not None: + params['task'] = task + if status is not None: + params['status'] = status + if reason is not None: + params['reason'] = reason + return self._make_request( + action='SubmitTaskStateChange', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/ecs/__init__.py b/ext/boto/ecs/__init__.py new file mode 100644 index 0000000000..46db50650e --- /dev/null +++ b/ext/boto/ecs/__init__.py @@ -0,0 +1,105 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.connection import AWSQueryConnection, AWSAuthConnection +from boto.exception import BotoServerError +import time +import urllib +import xml.sax +from boto.ecs.item import ItemSet +from boto import handler + +class ECSConnection(AWSQueryConnection): + """ + ECommerce Connection + + For more information on how to use this module see: + + http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html + """ + + APIVersion = '2010-11-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com', + debug=0, https_connection_factory=None, path='/', + security_token=None, profile_name=None): + super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, path, + security_token=security_token, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['ecs'] + + def get_response(self, action, params, page=0, itemSet=None): + """ + Utility method to handle calls to ECS and parsing of responses. + """ + params['Service'] = "AWSECommerceService" + params['Operation'] = action + if page: + params['ItemPage'] = page + response = self.make_request(None, params, "/onca/xml") + body = response.read().decode('utf-8') + boto.log.debug(body) + + if response.status != 200: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise BotoServerError(response.status, response.reason, body) + + if itemSet is None: + rs = ItemSet(self, action, params, page) + else: + rs = itemSet + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body.encode('utf-8'), h) + if not rs.is_valid: + raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0])) + return rs + + # + # Group methods + # + + def item_search(self, search_index, **params): + """ + Returns items that satisfy the search criteria, including one or more search + indices. + + For a full list of search terms, + :see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html + """ + params['SearchIndex'] = search_index + return self.get_response('ItemSearch', params) + + def item_lookup(self, **params): + """ + Returns items that satisfy the lookup query. + + For a full list of parameters, see: + http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf + """ + return self.get_response('ItemLookup', params) \ No newline at end of file diff --git a/ext/boto/ecs/item.py b/ext/boto/ecs/item.py new file mode 100644 index 0000000000..79177a31d4 --- /dev/null +++ b/ext/boto/ecs/item.py @@ -0,0 +1,164 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import xml.sax +import cgi +from boto.compat import six, StringIO + +class ResponseGroup(xml.sax.ContentHandler): + """A Generic "Response Group", which can + be anything from the entire list of Items to + specific response elements within an item""" + + def __init__(self, connection=None, nodename=None): + """Initialize this Item""" + self._connection = connection + self._nodename = nodename + self._nodepath = [] + self._curobj = None + self._xml = StringIO() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.__dict__) + + # + # Attribute Functions + # + def get(self, name): + return self.__dict__.get(name) + + def set(self, name, value): + self.__dict__[name] = value + + def to_xml(self): + return "<%s>%s" % (self._nodename, self._xml.getvalue(), self._nodename) + + # + # XML Parser functions + # + def startElement(self, name, attrs, connection): + self._xml.write("<%s>" % name) + self._nodepath.append(name) + if len(self._nodepath) == 1: + obj = ResponseGroup(self._connection) + self.set(name, obj) + self._curobj = obj + elif self._curobj: + self._curobj.startElement(name, attrs, connection) + return None + + def endElement(self, name, value, connection): + self._xml.write("%s" % (cgi.escape(value).replace("&amp;", "&"), name)) + if len(self._nodepath) == 0: + return + obj = None + curval = self.get(name) + if len(self._nodepath) == 1: + if value or not curval: + self.set(name, value) + if self._curobj: + self._curobj = None + #elif len(self._nodepath) == 2: + #self._curobj = None + elif self._curobj: + self._curobj.endElement(name, value, connection) + self._nodepath.pop() + return None + + +class Item(ResponseGroup): + """A single Item""" + + def __init__(self, connection=None): + """Initialize this Item""" + ResponseGroup.__init__(self, connection, "Item") + +class ItemSet(ResponseGroup): + """A special ResponseGroup that has built-in paging, and + only creates new Items on the "Item" tag""" + + def __init__(self, connection, action, params, page=0): + ResponseGroup.__init__(self, connection, "Items") + self.objs = [] + self.iter = None + self.page = page + self.action = action + self.params = params + self.curItem = None + self.total_results = 0 + self.total_pages = 0 + self.is_valid = False + self.errors = [] + + def startElement(self, name, attrs, connection): + if name == "Item": + self.curItem = Item(self._connection) + elif self.curItem is not None: + self.curItem.startElement(name, attrs, connection) + return None + + def endElement(self, name, value, connection): + if name == 'TotalResults': + self.total_results = value + elif name == 'TotalPages': + self.total_pages = value + elif name == 'IsValid': + if value == 'True': + self.is_valid = True + elif name == 'Code': + self.errors.append({'Code': value, 'Message': None}) + elif name == 'Message': + self.errors[-1]['Message'] = value + elif name == 'Item': + self.objs.append(self.curItem) + self._xml.write(self.curItem.to_xml()) + self.curItem = None + elif self.curItem is not None: + self.curItem.endElement(name, value, connection) + return None + + def __next__(self): + """Special paging functionality""" + if self.iter is None: + self.iter = iter(self.objs) + try: + return next(self.iter) + except StopIteration: + self.iter = None + self.objs = [] + if int(self.page) < int(self.total_pages): + self.page += 1 + self._connection.get_response(self.action, self.params, self.page, self) + return next(self) + else: + raise + + next = __next__ + + def __iter__(self): + return self + + def to_xml(self): + """Override to first fetch everything""" + for item in self: + pass + return ResponseGroup.to_xml(self) diff --git a/ext/boto/elasticache/__init__.py b/ext/boto/elasticache/__init__.py new file mode 100644 index 0000000000..ff5aaf1f41 --- /dev/null +++ b/ext/boto/elasticache/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS ElastiCache service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.elasticache.layer1 import ElastiCacheConnection + return get_regions('elasticache', connection_cls=ElastiCacheConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.elasticache.layer1 import ElastiCacheConnection + return connect('elasticache', region_name, + connection_cls=ElastiCacheConnection, **kw_params) diff --git a/ext/boto/elasticache/layer1.py b/ext/boto/elasticache/layer1.py new file mode 100644 index 0000000000..62bdefd187 --- /dev/null +++ b/ext/boto/elasticache/layer1.py @@ -0,0 +1,1664 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo + + +class ElastiCacheConnection(AWSQueryConnection): + """ + Amazon ElastiCache + Amazon ElastiCache is a web service that makes it easier to set + up, operate, and scale a distributed cache in the cloud. + + With ElastiCache, customers gain all of the benefits of a high- + performance, in-memory cache with far less of the administrative + burden of launching and managing a distributed cache. The service + makes set-up, scaling, and cluster failure handling much simpler + than in a self-managed cache deployment. + + In addition, through integration with Amazon CloudWatch, customers + get enhanced visibility into the key performance statistics + associated with their cache and can receive alarms if a part of + their cache runs hot. + """ + APIVersion = "2013-06-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "elasticache.us-east-1.amazonaws.com" + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(ElastiCacheConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def authorize_cache_security_group_ingress(self, + cache_security_group_name, + ec2_security_group_name, + ec2_security_group_owner_id): + """ + The AuthorizeCacheSecurityGroupIngress operation allows + network ingress to a cache security group. Applications using + ElastiCache must be running on Amazon EC2, and Amazon EC2 + security groups are used as the authorization mechanism. + You cannot authorize ingress from an Amazon EC2 security group + in one Region to an ElastiCache cluster in another Region. + + :type cache_security_group_name: string + :param cache_security_group_name: The cache security group which will + allow network ingress. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The Amazon EC2 security group to be + authorized for ingress to the cache security group. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the + Amazon EC2 security group owner. Note that this is not the same + thing as an AWS access key ID - you must provide a valid AWS + account number for this parameter. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'EC2SecurityGroupName': ec2_security_group_name, + 'EC2SecurityGroupOwnerId': ec2_security_group_owner_id, + } + return self._make_request( + action='AuthorizeCacheSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None, + cache_node_type=None, engine=None, + replication_group_id=None, engine_version=None, + cache_parameter_group_name=None, + cache_subnet_group_name=None, + cache_security_group_names=None, + security_group_ids=None, snapshot_arns=None, + preferred_availability_zone=None, + preferred_maintenance_window=None, port=None, + notification_topic_arn=None, + auto_minor_version_upgrade=None): + """ + The CreateCacheCluster operation creates a new cache cluster. + All nodes in the cache cluster run the same protocol-compliant + cache engine software - either Memcached or Redis. + + :type cache_cluster_id: string + :param cache_cluster_id: + The cache cluster identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 20 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type replication_group_id: string + :param replication_group_id: The replication group to which this cache + cluster should belong. If this parameter is specified, the cache + cluster will be added to the specified replication group as a read + replica; otherwise, the cache cluster will be a standalone primary + that is not part of any replication group. + + :type num_cache_nodes: integer + :param num_cache_nodes: The initial number of cache nodes that the + cache cluster will have. + For a Memcached cluster, valid values are between 1 and 20. If you need + to exceed this limit, please fill out the ElastiCache Limit + Increase Request form at ``_ . + + For Redis, only single-node cache clusters are supported at this time, + so the value for this parameter must be 1. + + :type cache_node_type: string + :param cache_node_type: The compute and memory capacity of the nodes in + the cache cluster. + Valid values for Memcached: + + `cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` | + `cache.m1.large` | `cache.m1.xlarge` | `cache.m3.xlarge` | + `cache.m3.2xlarge` | `cache.m2.xlarge` | `cache.m2.2xlarge` | + `cache.m2.4xlarge` | `cache.c1.xlarge` + + Valid values for Redis: + + `cache.t1.micro` | `cache.m1.small` | `cache.m1.medium` | + `cache.m1.large` | `cache.m1.xlarge` | `cache.m2.xlarge` | + `cache.m2.2xlarge` | `cache.m2.4xlarge` | `cache.c1.xlarge` + + For a complete listing of cache node types and specifications, see `. + + :type engine: string + :param engine: The name of the cache engine to be used for this cache + cluster. + Valid values for this parameter are: + + `memcached` | `redis` + + :type engine_version: string + :param engine_version: The version number of the cache engine to be + used for this cluster. To view the supported cache engine versions, + use the DescribeCacheEngineVersions operation. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to associate with this cache cluster. If this argument is + omitted, the default cache parameter group for the specified engine + will be used. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + be used for the cache cluster. + Use this parameter only when you are creating a cluster in an Amazon + Virtual Private Cloud (VPC). + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to associate with this cache cluster. + Use this parameter only when you are creating a cluster outside of an + Amazon Virtual Private Cloud (VPC). + + :type security_group_ids: list + :param security_group_ids: One or more VPC security groups associated + with the cache cluster. + Use this parameter only when you are creating a cluster in an Amazon + Virtual Private Cloud (VPC). + + :type snapshot_arns: list + :param snapshot_arns: A single-element string list containing an Amazon + Resource Name (ARN) that uniquely identifies a Redis RDB snapshot + file stored in Amazon S3. The snapshot file will be used to + populate the Redis cache in the new cache cluster. The Amazon S3 + object name in the ARN cannot contain any commas. + Here is an example of an Amazon S3 ARN: + `arn:aws:s3:::my_bucket/snapshot1.rdb` + + **Note:** This parameter is only valid if the `Engine` parameter is + `redis`. + + :type preferred_availability_zone: string + :param preferred_availability_zone: The EC2 Availability Zone in which + the cache cluster will be created. + All cache nodes belonging to a cache cluster are placed in the + preferred availability zone. + + Default: System chosen availability zone. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Example: `sun:05:00-sun:09:00` + + :type port: integer + :param port: The port number on which each of the cache nodes will + accept connections. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the Amazon Simple Notification + Service (SNS) topic to which notifications will be sent. + + The Amazon SNS topic owner must be the same as the cache cluster owner. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Determines whether minor engine + upgrades will be applied automatically to the cache cluster during + the maintenance window. A value of `True` allows these upgrades to + occur; `False` disables automatic upgrades. + Default: `True` + + """ + params = { + 'CacheClusterId': cache_cluster_id, + } + if num_cache_nodes is not None: + params['NumCacheNodes'] = num_cache_nodes + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if engine is not None: + params['Engine'] = engine + if replication_group_id is not None: + params['ReplicationGroupId'] = replication_group_id + if engine_version is not None: + params['EngineVersion'] = engine_version + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if cache_subnet_group_name is not None: + params['CacheSubnetGroupName'] = cache_subnet_group_name + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if snapshot_arns is not None: + self.build_list_params(params, + snapshot_arns, + 'SnapshotArns.member') + if preferred_availability_zone is not None: + params['PreferredAvailabilityZone'] = preferred_availability_zone + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if port is not None: + params['Port'] = port + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + return self._make_request( + action='CreateCacheCluster', + verb='POST', + path='/', params=params) + + def create_cache_parameter_group(self, cache_parameter_group_name, + cache_parameter_group_family, + description): + """ + The CreateCacheParameterGroup operation creates a new cache + parameter group. A cache parameter group is a collection of + parameters that you apply to all of the nodes in a cache + cluster. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: A user-specified name for the cache + parameter group. + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: The name of the cache parameter + group family the cache parameter group can be used with. + Valid values are: `memcached1.4` | `redis2.6` + + :type description: string + :param description: A user-specified description for the cache + parameter group. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + 'CacheParameterGroupFamily': cache_parameter_group_family, + 'Description': description, + } + return self._make_request( + action='CreateCacheParameterGroup', + verb='POST', + path='/', params=params) + + def create_cache_security_group(self, cache_security_group_name, + description): + """ + The CreateCacheSecurityGroup operation creates a new cache + security group. Use a cache security group to control access + to one or more cache clusters. + + Cache security groups are only used when you are creating a + cluster outside of an Amazon Virtual Private Cloud (VPC). If + you are creating a cluster inside of a VPC, use a cache subnet + group instead. For more information, see + CreateCacheSubnetGroup . + + :type cache_security_group_name: string + :param cache_security_group_name: A name for the cache security group. + This value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be the word "Default". + + Example: `mysecuritygroup` + + :type description: string + :param description: A description for the cache security group. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'Description': description, + } + return self._make_request( + action='CreateCacheSecurityGroup', + verb='POST', + path='/', params=params) + + def create_cache_subnet_group(self, cache_subnet_group_name, + cache_subnet_group_description, subnet_ids): + """ + The CreateCacheSubnetGroup operation creates a new cache + subnet group. + + Use this parameter only when you are creating a cluster in an + Amazon Virtual Private Cloud (VPC). + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: A name for the cache subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + Example: `mysubnetgroup` + + :type cache_subnet_group_description: string + :param cache_subnet_group_description: A description for the cache + subnet group. + + :type subnet_ids: list + :param subnet_ids: A list of VPC subnet IDs for the cache subnet group. + + """ + params = { + 'CacheSubnetGroupName': cache_subnet_group_name, + 'CacheSubnetGroupDescription': cache_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='CreateCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def create_replication_group(self, replication_group_id, + primary_cluster_id, + replication_group_description): + """ + The CreateReplicationGroup operation creates a replication + group. A replication group is a collection of cache clusters, + where one of the clusters is a read/write primary and the + other clusters are read-only replicas. Writes to the primary + are automatically propagated to the replicas. + + When you create a replication group, you must specify an + existing cache cluster that is in the primary role. When the + replication group has been successfully created, you can add + one or more read replica replicas to it, up to a total of five + read replicas. + + :type replication_group_id: string + :param replication_group_id: + The replication group identifier. This parameter is stored as a + lowercase string. + + Constraints: + + + + Must contain from 1 to 20 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type primary_cluster_id: string + :param primary_cluster_id: The identifier of the cache cluster that + will serve as the primary for this replication group. This cache + cluster must already exist and have a status of available . + + :type replication_group_description: string + :param replication_group_description: A user-specified description for + the replication group. + + """ + params = { + 'ReplicationGroupId': replication_group_id, + 'PrimaryClusterId': primary_cluster_id, + 'ReplicationGroupDescription': replication_group_description, + } + return self._make_request( + action='CreateReplicationGroup', + verb='POST', + path='/', params=params) + + def delete_cache_cluster(self, cache_cluster_id): + """ + The DeleteCacheCluster operation deletes a previously + provisioned cache cluster. DeleteCacheCluster deletes all + associated cache nodes, node endpoints and the cache cluster + itself. When you receive a successful response from this + operation, Amazon ElastiCache immediately begins deleting the + cache cluster; you cannot cancel or revert this operation. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier for the cluster + to be deleted. This parameter is not case sensitive. + + """ + params = {'CacheClusterId': cache_cluster_id, } + return self._make_request( + action='DeleteCacheCluster', + verb='POST', + path='/', params=params) + + def delete_cache_parameter_group(self, cache_parameter_group_name): + """ + The DeleteCacheParameterGroup operation deletes the specified + cache parameter group. You cannot delete a cache parameter + group if it is associated with any cache clusters. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: + The name of the cache parameter group to delete. + + The specified cache security group must not be associated with any + cache clusters. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + return self._make_request( + action='DeleteCacheParameterGroup', + verb='POST', + path='/', params=params) + + def delete_cache_security_group(self, cache_security_group_name): + """ + The DeleteCacheSecurityGroup operation deletes a cache + security group. + You cannot delete a cache security group if it is associated + with any cache clusters. + + :type cache_security_group_name: string + :param cache_security_group_name: + The name of the cache security group to delete. + + You cannot delete the default security group. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + } + return self._make_request( + action='DeleteCacheSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_cache_subnet_group(self, cache_subnet_group_name): + """ + The DeleteCacheSubnetGroup operation deletes a cache subnet + group. + You cannot delete a cache subnet group if it is associated + with any cache clusters. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + delete. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + """ + params = {'CacheSubnetGroupName': cache_subnet_group_name, } + return self._make_request( + action='DeleteCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_replication_group(self, replication_group_id): + """ + The DeleteReplicationGroup operation deletes an existing + replication group. DeleteReplicationGroup deletes the primary + cache cluster and all of the read replicas in the replication + group. When you receive a successful response from this + operation, Amazon ElastiCache immediately begins deleting the + entire replication group; you cannot cancel or revert this + operation. + + :type replication_group_id: string + :param replication_group_id: The identifier for the replication group + to be deleted. This parameter is not case sensitive. + + """ + params = {'ReplicationGroupId': replication_group_id, } + return self._make_request( + action='DeleteReplicationGroup', + verb='POST', + path='/', params=params) + + def describe_cache_clusters(self, cache_cluster_id=None, + max_records=None, marker=None, + show_cache_node_info=None): + """ + The DescribeCacheClusters operation returns information about + all provisioned cache clusters if no cache cluster identifier + is specified, or about a specific cache cluster if a cache + cluster identifier is supplied. + + By default, abbreviated information about the cache + clusters(s) will be returned. You can use the optional + ShowDetails flag to retrieve detailed information about the + cache nodes associated with the cache clusters. These details + include the DNS address and port for the cache node endpoint. + + If the cluster is in the CREATING state, only cluster level + information will be displayed until all of the nodes are + successfully provisioned. + + If the cluster is in the DELETING state, only cluster level + information will be displayed. + + If cache nodes are currently being added to the cache cluster, + node endpoint information and creation time for the additional + nodes will not be displayed until they are completely + provisioned. When the cache cluster state is available , the + cluster is ready for use. + + If cache nodes are currently being removed from the cache + cluster, no endpoint information for the removed nodes is + displayed. + + :type cache_cluster_id: string + :param cache_cluster_id: The user-supplied cluster identifier. If this + parameter is specified, only information about that specific cache + cluster is returned. This parameter isn't case sensitive. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + :type show_cache_node_info: boolean + :param show_cache_node_info: An optional flag that can be included in + the DescribeCacheCluster request to retrieve information about the + individual cache nodes. + + """ + params = {} + if cache_cluster_id is not None: + params['CacheClusterId'] = cache_cluster_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if show_cache_node_info is not None: + params['ShowCacheNodeInfo'] = str( + show_cache_node_info).lower() + return self._make_request( + action='DescribeCacheClusters', + verb='POST', + path='/', params=params) + + def describe_cache_engine_versions(self, engine=None, + engine_version=None, + cache_parameter_group_family=None, + max_records=None, marker=None, + default_only=None): + """ + The DescribeCacheEngineVersions operation returns a list of + the available cache engines and their versions. + + :type engine: string + :param engine: The cache engine to return. Valid values: `memcached` | + `redis` + + :type engine_version: string + :param engine_version: The cache engine version to return. + Example: `1.4.14` + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: + The name of a specific cache parameter group family to return details + for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + :type default_only: boolean + :param default_only: If true , specifies that only the default version + of the specified engine or engine and major version combination is + to be returned. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if cache_parameter_group_family is not None: + params['CacheParameterGroupFamily'] = cache_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + return self._make_request( + action='DescribeCacheEngineVersions', + verb='POST', + path='/', params=params) + + def describe_cache_parameter_groups(self, + cache_parameter_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheParameterGroups operation returns a list of + cache parameter group descriptions. If a cache parameter group + name is specified, the list will contain only the descriptions + for that group. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of a specific cache + parameter group to return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheParameterGroups', + verb='POST', + path='/', params=params) + + def describe_cache_parameters(self, cache_parameter_group_name, + source=None, max_records=None, marker=None): + """ + The DescribeCacheParameters operation returns the detailed + parameter list for a particular cache parameter group. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of a specific cache + parameter group to return details for. + + :type source: string + :param source: The parameter types to return. + Valid values: `user` | `system` | `engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheParameters', + verb='POST', + path='/', params=params) + + def describe_cache_security_groups(self, cache_security_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheSecurityGroups operation returns a list of + cache security group descriptions. If a cache security group + name is specified, the list will contain only the description + of that group. + + :type cache_security_group_name: string + :param cache_security_group_name: The name of the cache security group + to return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_security_group_name is not None: + params['CacheSecurityGroupName'] = cache_security_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_cache_subnet_groups(self, cache_subnet_group_name=None, + max_records=None, marker=None): + """ + The DescribeCacheSubnetGroups operation returns a list of + cache subnet group descriptions. If a subnet group name is + specified, the list will contain only the description of that + group. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name of the cache subnet group to + return details for. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if cache_subnet_group_name is not None: + params['CacheSubnetGroupName'] = cache_subnet_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeCacheSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, + cache_parameter_group_family, + max_records=None, marker=None): + """ + The DescribeEngineDefaultParameters operation returns the + default engine and system parameter information for the + specified cache engine. + + :type cache_parameter_group_family: string + :param cache_parameter_group_family: The name of the cache parameter + group family. Valid values are: `memcached1.4` | `redis2.6` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = { + 'CacheParameterGroupFamily': cache_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + max_records=None, marker=None): + """ + The DescribeEvents operation returns events related to cache + clusters, cache security groups, and cache parameter groups. + You can obtain events specific to a particular cache cluster, + cache security group, or cache parameter group by providing + the name as a parameter. + + By default, only the events occurring within the last hour are + returned; however, you can retrieve up to 14 days' worth of + events if necessary. + + :type source_identifier: string + :param source_identifier: The identifier of the event source for which + events will be returned. If not specified, then all sources are + included in the response. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + Valid values are: `cache-cluster` | `cache-parameter-group` | `cache- + security-group` | `cache-subnet-group` + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. + + :type duration: integer + :param duration: The number of minutes' worth of events to retrieve. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_replication_groups(self, replication_group_id=None, + max_records=None, marker=None): + """ + The DescribeReplicationGroups operation returns information + about a particular replication group. If no identifier is + specified, DescribeReplicationGroups returns information about + all replication groups. + + :type replication_group_id: string + :param replication_group_id: The identifier for the replication group + to be described. This parameter is not case sensitive. + If you do not specify this parameter, information about all replication + groups is returned. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if replication_group_id is not None: + params['ReplicationGroupId'] = replication_group_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReplicationGroups', + verb='POST', + path='/', params=params) + + def describe_reserved_cache_nodes(self, reserved_cache_node_id=None, + reserved_cache_nodes_offering_id=None, + cache_node_type=None, duration=None, + product_description=None, + offering_type=None, max_records=None, + marker=None): + """ + The DescribeReservedCacheNodes operation returns information + about reserved cache nodes for this account, or about a + specified reserved cache node. + + :type reserved_cache_node_id: string + :param reserved_cache_node_id: The reserved cache node identifier + filter value. Use this parameter to show only the reservation that + matches the specified reservation ID. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The offering identifier filter + value. Use this parameter to show only purchased reservations + matching the specified offering identifier. + + :type cache_node_type: string + :param cache_node_type: The cache node type filter value. Use this + parameter to show only those reservations matching the specified + cache node type. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Use this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. Use + this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Use this + parameter to show only the available offerings matching the + specified offering type. + Valid values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if reserved_cache_node_id is not None: + params['ReservedCacheNodeId'] = reserved_cache_node_id + if reserved_cache_nodes_offering_id is not None: + params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedCacheNodes', + verb='POST', + path='/', params=params) + + def describe_reserved_cache_nodes_offerings(self, + reserved_cache_nodes_offering_id=None, + cache_node_type=None, + duration=None, + product_description=None, + offering_type=None, + max_records=None, + marker=None): + """ + The DescribeReservedCacheNodesOfferings operation lists + available reserved cache node offerings. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The offering identifier filter + value. Use this parameter to show only the available offering that + matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type cache_node_type: string + :param cache_node_type: The cache node type filter value. Use this + parameter to show only the available offerings matching the + specified cache node type. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Use this parameter to show only reservations for a given duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. Use + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Use this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results can be retrieved. + Default: 100 + + Constraints: minimum 20; maximum 100. + + :type marker: string + :param marker: An optional marker returned from a prior request. Use + this marker for pagination of results from this operation. If this + parameter is specified, the response includes only records beyond + the marker, up to the value specified by MaxRecords . + + """ + params = {} + if reserved_cache_nodes_offering_id is not None: + params['ReservedCacheNodesOfferingId'] = reserved_cache_nodes_offering_id + if cache_node_type is not None: + params['CacheNodeType'] = cache_node_type + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedCacheNodesOfferings', + verb='POST', + path='/', params=params) + + def modify_cache_cluster(self, cache_cluster_id, num_cache_nodes=None, + cache_node_ids_to_remove=None, + cache_security_group_names=None, + security_group_ids=None, + preferred_maintenance_window=None, + notification_topic_arn=None, + cache_parameter_group_name=None, + notification_topic_status=None, + apply_immediately=None, engine_version=None, + auto_minor_version_upgrade=None): + """ + The ModifyCacheCluster operation modifies the settings for a + cache cluster. You can use this operation to change one or + more cluster configuration parameters by specifying the + parameters and the new values. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier. This value is + stored as a lowercase string. + + :type num_cache_nodes: integer + :param num_cache_nodes: The number of cache nodes that the cache + cluster should have. If the value for NumCacheNodes is greater than + the existing number of cache nodes, then more nodes will be added. + If the value is less than the existing number of cache nodes, then + cache nodes will be removed. + If you are removing cache nodes, you must use the CacheNodeIdsToRemove + parameter to provide the IDs of the specific cache nodes to be + removed. + + :type cache_node_ids_to_remove: list + :param cache_node_ids_to_remove: A list of cache node IDs to be + removed. A node ID is a numeric identifier (0001, 0002, etc.). This + parameter is only valid when NumCacheNodes is less than the + existing number of cache nodes. The number of cache node IDs + supplied in this parameter must match the difference between the + existing number of cache nodes in the cluster and the value of + NumCacheNodes in the request. + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to authorize on this cache cluster. This change is asynchronously + applied as soon as possible. + This parameter can be used only with clusters that are created outside + of an Amazon Virtual Private Cloud (VPC). + + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be "Default". + + :type security_group_ids: list + :param security_group_ids: Specifies the VPC Security Groups associated + with the cache cluster. + This parameter can be used only with clusters that are created in an + Amazon Virtual Private Cloud (VPC). + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. Note that system + maintenance may result in an outage. This change is made + immediately. If you are moving this window to the current time, + there must be at least 120 minutes between the current time and end + of the window to ensure that pending changes are applied. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the SNS topic to which notifications + will be sent. + + The SNS topic owner must be same as the cache cluster owner. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to apply to this cache cluster. This change is asynchronously + applied as soon as possible for parameters when the + ApplyImmediately parameter is specified as true for this request. + + :type notification_topic_status: string + :param notification_topic_status: The status of the Amazon SNS + notification topic. Notifications are sent only if the status is + active . + Valid values: `active` | `inactive` + + :type apply_immediately: boolean + :param apply_immediately: If `True`, this parameter causes the + modifications in this request and any pending modifications to be + applied, asynchronously and as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the cache cluster. + If `False`, then changes to the cache cluster are applied on the next + maintenance reboot, or the next failure reboot, whichever occurs + first. + + Valid values: `True` | `False` + + Default: `False` + + :type engine_version: string + :param engine_version: The upgraded version of the cache engine to be + run on the cache cluster nodes. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: If `True`, then minor engine + upgrades will be applied automatically to the cache cluster during + the maintenance window. + Valid values: `True` | `False` + + Default: `True` + + """ + params = {'CacheClusterId': cache_cluster_id, } + if num_cache_nodes is not None: + params['NumCacheNodes'] = num_cache_nodes + if cache_node_ids_to_remove is not None: + self.build_list_params(params, + cache_node_ids_to_remove, + 'CacheNodeIdsToRemove.member') + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if notification_topic_status is not None: + params['NotificationTopicStatus'] = notification_topic_status + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + return self._make_request( + action='ModifyCacheCluster', + verb='POST', + path='/', params=params) + + def modify_cache_parameter_group(self, cache_parameter_group_name, + parameter_name_values): + """ + The ModifyCacheParameterGroup operation modifies the + parameters of a cache parameter group. You can modify up to 20 + parameters in a single request by submitting a list parameter + name and value pairs. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to modify. + + :type parameter_name_values: list + :param parameter_name_values: An array of parameter names and values + for the parameter update. You must supply at least one parameter + name and value; subsequent arguments are optional. A maximum of 20 + parameters may be modified per request. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + self.build_complex_list_params( + params, parameter_name_values, + 'ParameterNameValues.member', + ('ParameterName', 'ParameterValue')) + return self._make_request( + action='ModifyCacheParameterGroup', + verb='POST', + path='/', params=params) + + def modify_cache_subnet_group(self, cache_subnet_group_name, + cache_subnet_group_description=None, + subnet_ids=None): + """ + The ModifyCacheSubnetGroup operation modifies an existing + cache subnet group. + + :type cache_subnet_group_name: string + :param cache_subnet_group_name: The name for the cache subnet group. + This value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. + + Example: `mysubnetgroup` + + :type cache_subnet_group_description: string + :param cache_subnet_group_description: A description for the cache + subnet group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the cache subnet group. + + """ + params = {'CacheSubnetGroupName': cache_subnet_group_name, } + if cache_subnet_group_description is not None: + params['CacheSubnetGroupDescription'] = cache_subnet_group_description + if subnet_ids is not None: + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='ModifyCacheSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_replication_group(self, replication_group_id, + replication_group_description=None, + cache_security_group_names=None, + security_group_ids=None, + preferred_maintenance_window=None, + notification_topic_arn=None, + cache_parameter_group_name=None, + notification_topic_status=None, + apply_immediately=None, engine_version=None, + auto_minor_version_upgrade=None, + primary_cluster_id=None): + """ + The ModifyReplicationGroup operation modifies the settings for + a replication group. + + :type replication_group_id: string + :param replication_group_id: The identifier of the replication group to + modify. + + :type replication_group_description: string + :param replication_group_description: A description for the replication + group. Maximum length is 255 characters. + + :type cache_security_group_names: list + :param cache_security_group_names: A list of cache security group names + to authorize for the clusters in this replication group. This + change is asynchronously applied as soon as possible. + This parameter can be used only with replication groups containing + cache clusters running outside of an Amazon Virtual Private Cloud + (VPC). + + Constraints: Must contain no more than 255 alphanumeric characters. + Must not be "Default". + + :type security_group_ids: list + :param security_group_ids: Specifies the VPC Security Groups associated + with the cache clusters in the replication group. + This parameter can be used only with replication groups containing + cache clusters running in an Amazon Virtual Private Cloud (VPC). + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which replication group system maintenance can occur. Note + that system maintenance may result in an outage. This change is + made immediately. If you are moving this window to the current + time, there must be at least 120 minutes between the current time + and end of the window to ensure that pending changes are applied. + + :type notification_topic_arn: string + :param notification_topic_arn: + The Amazon Resource Name (ARN) of the SNS topic to which notifications + will be sent. + + The SNS topic owner must be same as the replication group owner. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to apply to all of the cache nodes in this replication group. + This change is asynchronously applied as soon as possible for + parameters when the ApplyImmediately parameter is specified as true + for this request. + + :type notification_topic_status: string + :param notification_topic_status: The status of the Amazon SNS + notification topic for the replication group. Notifications are + sent only if the status is active . + Valid values: `active` | `inactive` + + :type apply_immediately: boolean + :param apply_immediately: If `True`, this parameter causes the + modifications in this request and any pending modifications to be + applied, asynchronously and as soon as possible, regardless of the + PreferredMaintenanceWindow setting for the replication group. + If `False`, then changes to the nodes in the replication group are + applied on the next maintenance reboot, or the next failure reboot, + whichever occurs first. + + Valid values: `True` | `False` + + Default: `False` + + :type engine_version: string + :param engine_version: The upgraded version of the cache engine to be + run on the nodes in the replication group.. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Determines whether minor engine + upgrades will be applied automatically to all of the cache nodes in + the replication group during the maintenance window. A value of + `True` allows these upgrades to occur; `False` disables automatic + upgrades. + + :type primary_cluster_id: string + :param primary_cluster_id: If this parameter is specified, ElastiCache + will promote each of the nodes in the specified cache cluster to + the primary role. The nodes of all other clusters in the + replication group will be read replicas. + + """ + params = {'ReplicationGroupId': replication_group_id, } + if replication_group_description is not None: + params['ReplicationGroupDescription'] = replication_group_description + if cache_security_group_names is not None: + self.build_list_params(params, + cache_security_group_names, + 'CacheSecurityGroupNames.member') + if security_group_ids is not None: + self.build_list_params(params, + security_group_ids, + 'SecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if notification_topic_arn is not None: + params['NotificationTopicArn'] = notification_topic_arn + if cache_parameter_group_name is not None: + params['CacheParameterGroupName'] = cache_parameter_group_name + if notification_topic_status is not None: + params['NotificationTopicStatus'] = notification_topic_status + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if primary_cluster_id is not None: + params['PrimaryClusterId'] = primary_cluster_id + return self._make_request( + action='ModifyReplicationGroup', + verb='POST', + path='/', params=params) + + def purchase_reserved_cache_nodes_offering(self, + reserved_cache_nodes_offering_id, + reserved_cache_node_id=None, + cache_node_count=None): + """ + The PurchaseReservedCacheNodesOffering operation allows you to + purchase a reserved cache node offering. + + :type reserved_cache_nodes_offering_id: string + :param reserved_cache_nodes_offering_id: The ID of the reserved cache + node offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_cache_node_id: string + :param reserved_cache_node_id: A customer-specified identifier to track + this reservation. + Example: myreservationID + + :type cache_node_count: integer + :param cache_node_count: The number of cache node instances to reserve. + Default: `1` + + """ + params = { + 'ReservedCacheNodesOfferingId': reserved_cache_nodes_offering_id, + } + if reserved_cache_node_id is not None: + params['ReservedCacheNodeId'] = reserved_cache_node_id + if cache_node_count is not None: + params['CacheNodeCount'] = cache_node_count + return self._make_request( + action='PurchaseReservedCacheNodesOffering', + verb='POST', + path='/', params=params) + + def reboot_cache_cluster(self, cache_cluster_id, + cache_node_ids_to_reboot): + """ + The RebootCacheCluster operation reboots some, or all, of the + cache cluster nodes within a provisioned cache cluster. This + API will apply any modified cache parameter groups to the + cache cluster. The reboot action takes place as soon as + possible, and results in a momentary outage to the cache + cluster. During the reboot, the cache cluster status is set to + REBOOTING. + + The reboot causes the contents of the cache (for each cache + cluster node being rebooted) to be lost. + + When the reboot is complete, a cache cluster event is created. + + :type cache_cluster_id: string + :param cache_cluster_id: The cache cluster identifier. This parameter + is stored as a lowercase string. + + :type cache_node_ids_to_reboot: list + :param cache_node_ids_to_reboot: A list of cache cluster node IDs to + reboot. A node ID is a numeric identifier (0001, 0002, etc.). To + reboot an entire cache cluster, specify all of the cache cluster + node IDs. + + """ + params = {'CacheClusterId': cache_cluster_id, } + self.build_list_params(params, + cache_node_ids_to_reboot, + 'CacheNodeIdsToReboot.member') + return self._make_request( + action='RebootCacheCluster', + verb='POST', + path='/', params=params) + + def reset_cache_parameter_group(self, cache_parameter_group_name, + parameter_name_values, + reset_all_parameters=None): + """ + The ResetCacheParameterGroup operation modifies the parameters + of a cache parameter group to the engine or system default + value. You can reset specific parameters by submitting a list + of parameter names. To reset the entire cache parameter group, + specify the ResetAllParameters and CacheParameterGroupName + parameters. + + :type cache_parameter_group_name: string + :param cache_parameter_group_name: The name of the cache parameter + group to reset. + + :type reset_all_parameters: boolean + :param reset_all_parameters: If true , all parameters in the cache + parameter group will be reset to default values. If false , no such + action occurs. + Valid values: `True` | `False` + + :type parameter_name_values: list + :param parameter_name_values: An array of parameter names to be reset. + If you are not resetting the entire cache parameter group, you must + specify at least one parameter name. + + """ + params = { + 'CacheParameterGroupName': cache_parameter_group_name, + } + self.build_complex_list_params( + params, parameter_name_values, + 'ParameterNameValues.member', + ('ParameterName', 'ParameterValue')) + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + return self._make_request( + action='ResetCacheParameterGroup', + verb='POST', + path='/', params=params) + + def revoke_cache_security_group_ingress(self, cache_security_group_name, + ec2_security_group_name, + ec2_security_group_owner_id): + """ + The RevokeCacheSecurityGroupIngress operation revokes ingress + from a cache security group. Use this operation to disallow + access from an Amazon EC2 security group that had been + previously authorized. + + :type cache_security_group_name: string + :param cache_security_group_name: The name of the cache security group + to revoke ingress from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the Amazon EC2 security + group to revoke access from. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the + Amazon EC2 security group owner. Note that this is not the same + thing as an AWS access key ID - you must provide a valid AWS + account number for this parameter. + + """ + params = { + 'CacheSecurityGroupName': cache_security_group_name, + 'EC2SecurityGroupName': ec2_security_group_name, + 'EC2SecurityGroupOwnerId': ec2_security_group_owner_id, + } + return self._make_request( + action='RevokeCacheSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + raise self.ResponseError(response.status, response.reason, body) diff --git a/ext/boto/elastictranscoder/__init__.py b/ext/boto/elastictranscoder/__init__.py new file mode 100644 index 0000000000..cf41b8a7e5 --- /dev/null +++ b/ext/boto/elastictranscoder/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Elastic Transcoder service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.elastictranscoder.layer1 import ElasticTranscoderConnection + return get_regions( + 'elastictranscoder', + connection_cls=ElasticTranscoderConnection + ) + + +def connect_to_region(region_name, **kw_params): + from boto.elastictranscoder.layer1 import ElasticTranscoderConnection + return connect('elastictranscoder', region_name, + connection_cls=ElasticTranscoderConnection, **kw_params) + diff --git a/ext/boto/elastictranscoder/exceptions.py b/ext/boto/elastictranscoder/exceptions.py new file mode 100644 index 0000000000..94b399f537 --- /dev/null +++ b/ext/boto/elastictranscoder/exceptions.py @@ -0,0 +1,50 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class LimitExceededException(JSONResponseError): + pass + + +class ResourceInUseException(JSONResponseError): + pass + + +class AccessDeniedException(JSONResponseError): + pass + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class InternalServiceException(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass + + +class IncompatibleVersionException(JSONResponseError): + pass diff --git a/ext/boto/elastictranscoder/layer1.py b/ext/boto/elastictranscoder/layer1.py new file mode 100644 index 0000000000..0f4dc9c74c --- /dev/null +++ b/ext/boto/elastictranscoder/layer1.py @@ -0,0 +1,932 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json +from boto.exception import JSONResponseError +from boto.connection import AWSAuthConnection +from boto.regioninfo import RegionInfo +from boto.elastictranscoder import exceptions + + +class ElasticTranscoderConnection(AWSAuthConnection): + """ + AWS Elastic Transcoder Service + The AWS Elastic Transcoder Service. + """ + APIVersion = "2012-09-25" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "IncompatibleVersionException": exceptions.IncompatibleVersionException, + "LimitExceededException": exceptions.LimitExceededException, + "ResourceInUseException": exceptions.ResourceInUseException, + "AccessDeniedException": exceptions.AccessDeniedException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InternalServiceException": exceptions.InternalServiceException, + "ValidationException": exceptions.ValidationException, + } + + + def __init__(self, **kwargs): + region = kwargs.get('region') + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + else: + del kwargs['region'] + kwargs['host'] = region.endpoint + super(ElasticTranscoderConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def cancel_job(self, id=None): + """ + The CancelJob operation cancels an unfinished job. + You can only cancel a job that has a status of `Submitted`. To + prevent a pipeline from starting to process a job while you're + getting the job identifier, use UpdatePipelineStatus to + temporarily pause the pipeline. + + :type id: string + :param id: The identifier of the job that you want to cancel. + To get a list of the jobs (including their `jobId`) that have a status + of `Submitted`, use the ListJobsByStatus API action. + + """ + uri = '/2012-09-25/jobs/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def create_job(self, pipeline_id=None, input_name=None, output=None, + outputs=None, output_key_prefix=None, playlists=None): + """ + When you create a job, Elastic Transcoder returns JSON data + that includes the values that you specified plus information + about the job that is created. + + If you have specified more than one output for your jobs (for + example, one output for the Kindle Fire and another output for + the Apple iPhone 4s), you currently must use the Elastic + Transcoder API to list the jobs (as opposed to the AWS + Console). + + :type pipeline_id: string + :param pipeline_id: The `Id` of the pipeline that you want Elastic + Transcoder to use for transcoding. The pipeline determines several + settings, including the Amazon S3 bucket from which Elastic + Transcoder gets the files to transcode and the bucket into which + Elastic Transcoder puts the transcoded files. + + :type input_name: dict + :param input_name: A section of the request body that provides + information about the file that is being transcoded. + + :type output: dict + :param output: The `CreateJobOutput` structure. + + :type outputs: list + :param outputs: A section of the request body that provides information + about the transcoded (target) files. We recommend that you use the + `Outputs` syntax instead of the `Output` syntax. + + :type output_key_prefix: string + :param output_key_prefix: The value, if any, that you want Elastic + Transcoder to prepend to the names of all files that this job + creates, including output files, thumbnails, and playlists. + + :type playlists: list + :param playlists: If you specify a preset in `PresetId` for which the + value of `Container` is ts (MPEG-TS), Playlists contains + information about the master playlists that you want Elastic + Transcoder to create. + We recommend that you create only one master playlist. The maximum + number of master playlists in a job is 30. + + """ + uri = '/2012-09-25/jobs' + params = {} + if pipeline_id is not None: + params['PipelineId'] = pipeline_id + if input_name is not None: + params['Input'] = input_name + if output is not None: + params['Output'] = output + if outputs is not None: + params['Outputs'] = outputs + if output_key_prefix is not None: + params['OutputKeyPrefix'] = output_key_prefix + if playlists is not None: + params['Playlists'] = playlists + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def create_pipeline(self, name=None, input_bucket=None, + output_bucket=None, role=None, notifications=None, + content_config=None, thumbnail_config=None): + """ + The CreatePipeline operation creates a pipeline with settings + that you specify. + + :type name: string + :param name: The name of the pipeline. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + Constraints: Maximum 40 characters. + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket in which you saved the media + files that you want to transcode. + + :type output_bucket: string + :param output_bucket: The Amazon S3 bucket in which you want Elastic + Transcoder to save the transcoded files. (Use this, or use + ContentConfig:Bucket plus ThumbnailConfig:Bucket.) + Specify this value when all of the following are true: + + + You want to save transcoded files, thumbnails (if any), and playlists + (if any) together in one bucket. + + You do not want to specify the users or groups who have access to the + transcoded files, thumbnails, and playlists. + + You do not want to specify the permissions that Elastic Transcoder + grants to the files. When Elastic Transcoder saves files in + `OutputBucket`, it grants full control over the files only to the + AWS account that owns the role that is specified by `Role`. + + You want to associate the transcoded files and thumbnails with the + Amazon S3 Standard storage class. + + + + If you want to save transcoded files and playlists in one bucket and + thumbnails in another bucket, specify which users can access the + transcoded files or the permissions the users have, or change the + Amazon S3 storage class, omit `OutputBucket` and specify values for + `ContentConfig` and `ThumbnailConfig` instead. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to use to create the pipeline. + + :type notifications: dict + :param notifications: + The Amazon Simple Notification Service (Amazon SNS) topic that you want + to notify to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + + **Progressing**: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when Elastic + Transcoder has started to process a job in this pipeline. This is + the ARN that Amazon SNS returned when you created the topic. For + more information, see Create a Topic in the Amazon Simple + Notification Service Developer Guide. + + **Completed**: The topic ARN for the Amazon SNS topic that you want + to notify when Elastic Transcoder has finished processing a job in + this pipeline. This is the ARN that Amazon SNS returned when you + created the topic. + + **Warning**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters a warning condition while + processing a job in this pipeline. This is the ARN that Amazon SNS + returned when you created the topic. + + **Error**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters an error condition while + processing a job in this pipeline. This is the ARN that Amazon SNS + returned when you created the topic. + + :type content_config: dict + :param content_config: + The optional `ContentConfig` object specifies information about the + Amazon S3 bucket in which you want Elastic Transcoder to save + transcoded files and playlists: which bucket to use, which users + you want to have access to the files, the type of access you want + users to have, and the storage class that you want to assign to the + files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig`. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save transcoded files and playlists. + + **Permissions** (Optional): The Permissions object specifies which + users you want to have access to transcoded files and the type of + access you want them to have. You can grant permissions to a + maximum of 30 users and/or predefined Amazon S3 groups. + + **Grantee Type**: Specify the type of value that appears in the + `Grantee` object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. For more information about + canonical user IDs, see Access Control List (ACL) Overview in the + Amazon Simple Storage Service Developer Guide. For more information + about using CloudFront origin access identities to require that + users use CloudFront URLs instead of Amazon S3 URLs, see Using an + Origin Access Identity to Restrict Access to Your Amazon S3 + Content. A canonical user ID is not the same as an AWS account + number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + transcoded files and playlists. To identify the user or group, you + can specify the canonical user ID for an AWS account, an origin + access identity for a CloudFront distribution, the registered email + address of an AWS account, or a predefined Amazon S3 group + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the files + that Elastic Transcoder adds to the bucket, including playlists and + video files. Valid values include: + + + `READ`: The grantee can read the objects and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the objects that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the video files and playlists that it stores in your Amazon S3 + bucket. + + :type thumbnail_config: dict + :param thumbnail_config: + The `ThumbnailConfig` object specifies several values, including the + Amazon S3 bucket in which you want Elastic Transcoder to save + thumbnail files, which users you want to have access to the files, + the type of access you want users to have, and the storage class + that you want to assign to the files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig` even if you don't want to create thumbnails. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. + + **Permissions** (Optional): The `Permissions` object specifies which + users and/or predefined Amazon S3 groups you want to have access to + thumbnail files, and the type of access you want them to have. You + can grant permissions to a maximum of 30 users and/or predefined + Amazon S3 groups. + + **GranteeType**: Specify the type of value that appears in the + Grantee object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. A canonical user ID is not + the same as an AWS account number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + thumbnail files. To identify the user or group, you can specify the + canonical user ID for an AWS account, an origin access identity for + a CloudFront distribution, the registered email address of an AWS + account, or a predefined Amazon S3 group. + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the + thumbnail files that Elastic Transcoder adds to the bucket. Valid + values include: + + + `READ`: The grantee can read the thumbnails and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the thumbnails that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the thumbnails that it stores in your Amazon S3 bucket. + + """ + uri = '/2012-09-25/pipelines' + params = {} + if name is not None: + params['Name'] = name + if input_bucket is not None: + params['InputBucket'] = input_bucket + if output_bucket is not None: + params['OutputBucket'] = output_bucket + if role is not None: + params['Role'] = role + if notifications is not None: + params['Notifications'] = notifications + if content_config is not None: + params['ContentConfig'] = content_config + if thumbnail_config is not None: + params['ThumbnailConfig'] = thumbnail_config + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def create_preset(self, name=None, description=None, container=None, + video=None, audio=None, thumbnails=None): + """ + The CreatePreset operation creates a preset with settings that + you specify. + Elastic Transcoder checks the CreatePreset settings to ensure + that they meet Elastic Transcoder requirements and to + determine whether they comply with H.264 standards. If your + settings are not valid for Elastic Transcoder, Elastic + Transcoder returns an HTTP 400 response ( + `ValidationException`) and does not create the preset. If the + settings are valid for Elastic Transcoder but aren't strictly + compliant with the H.264 standard, Elastic Transcoder creates + the preset and returns a warning message in the response. This + helps you determine whether your settings comply with the + H.264 standard while giving you greater flexibility with + respect to the video that Elastic Transcoder produces. + Elastic Transcoder uses the H.264 video-compression format. + For more information, see the International Telecommunication + Union publication Recommendation ITU-T H.264: Advanced video + coding for generic audiovisual services . + + :type name: string + :param name: The name of the preset. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + + :type description: string + :param description: A description of the preset. + + :type container: string + :param container: The container type for the output file. Valid values + include `mp3`, `mp4`, `ogg`, `ts`, and `webm`. + + :type video: dict + :param video: A section of the request body that specifies the video + parameters. + + :type audio: dict + :param audio: A section of the request body that specifies the audio + parameters. + + :type thumbnails: dict + :param thumbnails: A section of the request body that specifies the + thumbnail parameters, if any. + + """ + uri = '/2012-09-25/presets' + params = {} + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if container is not None: + params['Container'] = container + if video is not None: + params['Video'] = video + if audio is not None: + params['Audio'] = audio + if thumbnails is not None: + params['Thumbnails'] = thumbnails + return self.make_request('POST', uri, expected_status=201, + data=json.dumps(params)) + + def delete_pipeline(self, id=None): + """ + The DeletePipeline operation removes a pipeline. + + You can only delete a pipeline that has never been used or + that is not currently in use (doesn't contain any active + jobs). If the pipeline is currently in use, `DeletePipeline` + returns an error. + + :type id: string + :param id: The identifier of the pipeline that you want to delete. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def delete_preset(self, id=None): + """ + The DeletePreset operation removes a preset that you've added + in an AWS region. + + You can't delete the default presets that are included with + Elastic Transcoder. + + :type id: string + :param id: The identifier of the preset for which you want to get + detailed information. + + """ + uri = '/2012-09-25/presets/{0}'.format(id) + return self.make_request('DELETE', uri, expected_status=202) + + def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None, + page_token=None): + """ + The ListJobsByPipeline operation gets a list of the jobs + currently in a pipeline. + + Elastic Transcoder returns all of the jobs currently in the + specified pipeline. The response body contains one element for + each job that satisfies the search criteria. + + :type pipeline_id: string + :param pipeline_id: The ID of the pipeline for which you want to get + job information. + + :type ascending: string + :param ascending: To list jobs in chronological order by the date and + time that they were submitted, enter `True`. To list jobs in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id) + params = {} + if pipeline_id is not None: + params['PipelineId'] = pipeline_id + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_jobs_by_status(self, status=None, ascending=None, + page_token=None): + """ + The ListJobsByStatus operation gets a list of jobs that have a + specified status. The response body contains one element for + each job that satisfies the search criteria. + + :type status: string + :param status: To get information about all of the jobs associated with + the current AWS account that have a given status, specify the + following status: `Submitted`, `Progressing`, `Complete`, + `Canceled`, or `Error`. + + :type ascending: string + :param ascending: To list jobs in chronological order by the date and + time that they were submitted, enter `True`. To list jobs in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/jobsByStatus/{0}'.format(status) + params = {} + if status is not None: + params['Status'] = status + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_pipelines(self, ascending=None, page_token=None): + """ + The ListPipelines operation gets a list of the pipelines + associated with the current AWS account. + + :type ascending: string + :param ascending: To list pipelines in chronological order by the date + and time that they were created, enter `True`. To list pipelines in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/pipelines'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def list_presets(self, ascending=None, page_token=None): + """ + The ListPresets operation gets a list of the default presets + included with Elastic Transcoder and the presets that you've + added in an AWS region. + + :type ascending: string + :param ascending: To list presets in chronological order by the date + and time that they were created, enter `True`. To list presets in + reverse chronological order, enter `False`. + + :type page_token: string + :param page_token: When Elastic Transcoder returns more than one page + of results, use `pageToken` in subsequent `GET` requests to get + each successive page of results. + + """ + uri = '/2012-09-25/presets'.format() + params = {} + if ascending is not None: + params['Ascending'] = ascending + if page_token is not None: + params['PageToken'] = page_token + return self.make_request('GET', uri, expected_status=200, + params=params) + + def read_job(self, id=None): + """ + The ReadJob operation returns detailed information about a + job. + + :type id: string + :param id: The identifier of the job for which you want to get detailed + information. + + """ + uri = '/2012-09-25/jobs/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def read_pipeline(self, id=None): + """ + The ReadPipeline operation gets detailed information about a + pipeline. + + :type id: string + :param id: The identifier of the pipeline to read. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def read_preset(self, id=None): + """ + The ReadPreset operation gets detailed information about a + preset. + + :type id: string + :param id: The identifier of the preset for which you want to get + detailed information. + + """ + uri = '/2012-09-25/presets/{0}'.format(id) + return self.make_request('GET', uri, expected_status=200) + + def test_role(self, role=None, input_bucket=None, output_bucket=None, + topics=None): + """ + The TestRole operation tests the IAM role used to create the + pipeline. + + The `TestRole` action lets you determine whether the IAM role + you are using has sufficient permissions to let Elastic + Transcoder perform tasks associated with the transcoding + process. The action attempts to assume the specified IAM role, + checks read access to the input and output buckets, and tries + to send a test notification to Amazon SNS topics that you + specify. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to test. + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket that contains media files to + be transcoded. The action attempts to read from this bucket. + + :type output_bucket: string + :param output_bucket: The Amazon S3 bucket that Elastic Transcoder will + write transcoded media files to. The action attempts to read from + this bucket. + + :type topics: list + :param topics: The ARNs of one or more Amazon Simple Notification + Service (Amazon SNS) topics that you want the action to send a test + notification to. + + """ + uri = '/2012-09-25/roleTests' + params = {} + if role is not None: + params['Role'] = role + if input_bucket is not None: + params['InputBucket'] = input_bucket + if output_bucket is not None: + params['OutputBucket'] = output_bucket + if topics is not None: + params['Topics'] = topics + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline(self, id, name=None, input_bucket=None, role=None, + notifications=None, content_config=None, + thumbnail_config=None): + """ + Use the `UpdatePipeline` operation to update settings for a + pipeline. When you change pipeline settings, your changes take + effect immediately. Jobs that you have already submitted and + that Elastic Transcoder has not started to process are + affected in addition to jobs that you submit after you change + settings. + + :type id: string + :param id: The ID of the pipeline that you want to update. + + :type name: string + :param name: The name of the pipeline. We recommend that the name be + unique within the AWS account, but uniqueness is not enforced. + Constraints: Maximum 40 characters + + :type input_bucket: string + :param input_bucket: The Amazon S3 bucket in which you saved the media + files that you want to transcode and the graphics that you want to + use as watermarks. + + :type role: string + :param role: The IAM Amazon Resource Name (ARN) for the role that you + want Elastic Transcoder to use to transcode jobs for this pipeline. + + :type notifications: dict + :param notifications: + The Amazon Simple Notification Service (Amazon SNS) topic or topics to + notify in order to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + :type content_config: dict + :param content_config: + The optional `ContentConfig` object specifies information about the + Amazon S3 bucket in which you want Elastic Transcoder to save + transcoded files and playlists: which bucket to use, which users + you want to have access to the files, the type of access you want + users to have, and the storage class that you want to assign to the + files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig`. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save transcoded files and playlists. + + **Permissions** (Optional): The Permissions object specifies which + users you want to have access to transcoded files and the type of + access you want them to have. You can grant permissions to a + maximum of 30 users and/or predefined Amazon S3 groups. + + **Grantee Type**: Specify the type of value that appears in the + `Grantee` object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. For more information about + canonical user IDs, see Access Control List (ACL) Overview in the + Amazon Simple Storage Service Developer Guide. For more information + about using CloudFront origin access identities to require that + users use CloudFront URLs instead of Amazon S3 URLs, see Using an + Origin Access Identity to Restrict Access to Your Amazon S3 + Content. A canonical user ID is not the same as an AWS account + number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + transcoded files and playlists. To identify the user or group, you + can specify the canonical user ID for an AWS account, an origin + access identity for a CloudFront distribution, the registered email + address of an AWS account, or a predefined Amazon S3 group + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the files + that Elastic Transcoder adds to the bucket, including playlists and + video files. Valid values include: + + + `READ`: The grantee can read the objects and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the objects that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the objects that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the video files and playlists that it stores in your Amazon S3 + bucket. + + :type thumbnail_config: dict + :param thumbnail_config: + The `ThumbnailConfig` object specifies several values, including the + Amazon S3 bucket in which you want Elastic Transcoder to save + thumbnail files, which users you want to have access to the files, + the type of access you want users to have, and the storage class + that you want to assign to the files. + + If you specify values for `ContentConfig`, you must also specify values + for `ThumbnailConfig` even if you don't want to create thumbnails. + + If you specify values for `ContentConfig` and `ThumbnailConfig`, omit + the `OutputBucket` object. + + + + **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder + to save thumbnail files. + + **Permissions** (Optional): The `Permissions` object specifies which + users and/or predefined Amazon S3 groups you want to have access to + thumbnail files, and the type of access you want them to have. You + can grant permissions to a maximum of 30 users and/or predefined + Amazon S3 groups. + + **GranteeType**: Specify the type of value that appears in the + Grantee object: + + + **Canonical**: The value in the `Grantee` object is either the + canonical user ID for an AWS account or an origin access identity + for an Amazon CloudFront distribution. A canonical user ID is not + the same as an AWS account number. + + **Email**: The value in the `Grantee` object is the registered email + address of an AWS account. + + **Group**: The value in the `Grantee` object is one of the following + predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or + `LogDelivery`. + + + **Grantee**: The AWS user or group that you want to have access to + thumbnail files. To identify the user or group, you can specify the + canonical user ID for an AWS account, an origin access identity for + a CloudFront distribution, the registered email address of an AWS + account, or a predefined Amazon S3 group. + + **Access**: The permission that you want to give to the AWS user that + you specified in `Grantee`. Permissions are granted on the + thumbnail files that Elastic Transcoder adds to the bucket. Valid + values include: + + + `READ`: The grantee can read the thumbnails and metadata for objects + that Elastic Transcoder adds to the Amazon S3 bucket. + + `READ_ACP`: The grantee can read the object ACL for thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `WRITE_ACP`: The grantee can write the ACL for the thumbnails that + Elastic Transcoder adds to the Amazon S3 bucket. + + `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP` + permissions for the thumbnails that Elastic Transcoder adds to the + Amazon S3 bucket. + + + **StorageClass**: The Amazon S3 storage class, `Standard` or + `ReducedRedundancy`, that you want Elastic Transcoder to assign to + the thumbnails that it stores in your Amazon S3 bucket. + + """ + uri = '/2012-09-25/pipelines/{0}'.format(id) + params = {} + if name is not None: + params['Name'] = name + if input_bucket is not None: + params['InputBucket'] = input_bucket + if role is not None: + params['Role'] = role + if notifications is not None: + params['Notifications'] = notifications + if content_config is not None: + params['ContentConfig'] = content_config + if thumbnail_config is not None: + params['ThumbnailConfig'] = thumbnail_config + return self.make_request('PUT', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline_notifications(self, id=None, notifications=None): + """ + With the UpdatePipelineNotifications operation, you can update + Amazon Simple Notification Service (Amazon SNS) notifications + for a pipeline. + + When you update notifications for a pipeline, Elastic + Transcoder returns the values that you specified in the + request. + + :type id: string + :param id: The identifier of the pipeline for which you want to change + notification settings. + + :type notifications: dict + :param notifications: + The topic ARN for the Amazon Simple Notification Service (Amazon SNS) + topic that you want to notify to report job status. + To receive notifications, you must also subscribe to the new topic in + the Amazon SNS console. + + + **Progressing**: The topic ARN for the Amazon Simple Notification + Service (Amazon SNS) topic that you want to notify when Elastic + Transcoder has started to process jobs that are added to this + pipeline. This is the ARN that Amazon SNS returned when you created + the topic. + + **Completed**: The topic ARN for the Amazon SNS topic that you want + to notify when Elastic Transcoder has finished processing a job. + This is the ARN that Amazon SNS returned when you created the + topic. + + **Warning**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters a warning condition. This + is the ARN that Amazon SNS returned when you created the topic. + + **Error**: The topic ARN for the Amazon SNS topic that you want to + notify when Elastic Transcoder encounters an error condition. This + is the ARN that Amazon SNS returned when you created the topic. + + """ + uri = '/2012-09-25/pipelines/{0}/notifications'.format(id) + params = {} + if id is not None: + params['Id'] = id + if notifications is not None: + params['Notifications'] = notifications + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def update_pipeline_status(self, id=None, status=None): + """ + The UpdatePipelineStatus operation pauses or reactivates a + pipeline, so that the pipeline stops or restarts the + processing of jobs. + + Changing the pipeline status is useful if you want to cancel + one or more jobs. You can't cancel jobs after Elastic + Transcoder has started processing them; if you pause the + pipeline to which you submitted the jobs, you have more time + to get the job IDs for the jobs that you want to cancel, and + to send a CancelJob request. + + :type id: string + :param id: The identifier of the pipeline to update. + + :type status: string + :param status: + The desired status of the pipeline: + + + + `Active`: The pipeline is processing jobs. + + `Paused`: The pipeline is not currently processing jobs. + + """ + uri = '/2012-09-25/pipelines/{0}/status'.format(id) + params = {} + if id is not None: + params['Id'] = id + if status is not None: + params['Status'] = status + return self.make_request('POST', uri, expected_status=200, + data=json.dumps(params)) + + def make_request(self, verb, resource, headers=None, data='', + expected_status=None, params=None): + if headers is None: + headers = {} + response = super(ElasticTranscoderConnection, self).make_request( + verb, resource, headers=headers, data=data, params=params) + body = json.loads(response.read().decode('utf-8')) + if response.status == expected_status: + return body + else: + error_type = response.getheader('x-amzn-ErrorType').split(':')[0] + error_class = self._faults.get(error_type, self.ResponseError) + raise error_class(response.status, response.reason, body) diff --git a/ext/boto/emr/__init__.py b/ext/boto/emr/__init__.py new file mode 100644 index 0000000000..dfa53c7337 --- /dev/null +++ b/ext/boto/emr/__init__.py @@ -0,0 +1,48 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provies an interface to the Elastic MapReduce (EMR) +service from AWS. +""" +from boto.emr.connection import EmrConnection +from boto.emr.step import Step, StreamingStep, JarStep +from boto.emr.bootstrap_action import BootstrapAction +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Elastic MapReduce service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + return get_regions('elasticmapreduce', connection_cls=EmrConnection) + + +def connect_to_region(region_name, **kw_params): + return connect('elasticmapreduce', region_name, + connection_cls=EmrConnection, **kw_params) diff --git a/ext/boto/emr/bootstrap_action.py b/ext/boto/emr/bootstrap_action.py new file mode 100644 index 0000000000..5a01fd21cc --- /dev/null +++ b/ext/boto/emr/bootstrap_action.py @@ -0,0 +1,46 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + +class BootstrapAction(object): + def __init__(self, name, path, bootstrap_action_args): + self.name = name + self.path = path + + if isinstance(bootstrap_action_args, six.string_types): + bootstrap_action_args = [bootstrap_action_args] + + self.bootstrap_action_args = bootstrap_action_args + + def args(self): + args = [] + + if self.bootstrap_action_args: + args.extend(self.bootstrap_action_args) + + return args + + def __repr__(self): + return '%s.%s(name=%r, path=%r, bootstrap_action_args=%r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.path, self.bootstrap_action_args) diff --git a/ext/boto/emr/connection.py b/ext/boto/emr/connection.py new file mode 100644 index 0000000000..7afc4e07d1 --- /dev/null +++ b/ext/boto/emr/connection.py @@ -0,0 +1,765 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EMR service +""" +import types + +import boto +import boto.utils +from boto.ec2.regioninfo import RegionInfo +from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \ + Cluster, ClusterSummaryList, HadoopStep, \ + InstanceGroupList, InstanceList, JobFlow, \ + JobFlowStepList, \ + ModifyInstanceGroupsResponse, \ + RunJobFlowResponse, StepSummaryList +from boto.emr.step import JarStep +from boto.connection import AWSQueryConnection +from boto.exception import EmrResponseError +from boto.compat import six + + +class EmrConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31') + DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint', + 'elasticmapreduce.us-east-1.amazonaws.com') + ResponseError = EmrResponseError + + + + # Constants for AWS Console debugging + DebuggingJar = 's3://{region_name}.elasticmapreduce/libs/script-runner/script-runner.jar' + DebuggingArgs = 's3://{region_name}.elasticmapreduce/libs/state-pusher/0.1/fetch' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(EmrConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + # Many of the EMR hostnames are of the form: + # ..amazonaws.com + # rather than the more common: + # ..amazonaws.com + # so we need to explicitly set the region_name and service_name + # for the SigV4 signing. + self.auth_region_name = self.region.name + self.auth_service_name = 'elasticmapreduce' + + def _required_auth_capability(self): + return ['hmac-v4'] + + def describe_cluster(self, cluster_id): + """ + Describes an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + """ + params = { + 'ClusterId': cluster_id + } + return self.get_object('DescribeCluster', params, Cluster) + + def describe_jobflow(self, jobflow_id): + """ + This method is deprecated. We recommend you use list_clusters, + describe_cluster, list_steps, list_instance_groups and + list_bootstrap_actions instead. + + Describes a single Elastic MapReduce job flow + + :type jobflow_id: str + :param jobflow_id: The job flow id of interest + """ + jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id]) + if jobflows: + return jobflows[0] + + def describe_jobflows(self, states=None, jobflow_ids=None, + created_after=None, created_before=None): + """ + This method is deprecated. We recommend you use list_clusters, + describe_cluster, list_steps, list_instance_groups and + list_bootstrap_actions instead. + + Retrieve all the Elastic MapReduce job flows on your account + + :type states: list + :param states: A list of strings with job flow states wanted + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + :type created_after: datetime + :param created_after: Bound on job flow creation time + + :type created_before: datetime + :param created_before: Bound on job flow creation time + """ + params = {} + + if states: + self.build_list_params(params, states, 'JobFlowStates.member') + if jobflow_ids: + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + if created_after: + params['CreatedAfter'] = created_after.strftime( + boto.utils.ISO8601) + if created_before: + params['CreatedBefore'] = created_before.strftime( + boto.utils.ISO8601) + + return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) + + def describe_step(self, cluster_id, step_id): + """ + Describe an Elastic MapReduce step + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_id: str + :param step_id: The step id of interest + """ + params = { + 'ClusterId': cluster_id, + 'StepId': step_id + } + + return self.get_object('DescribeStep', params, HadoopStep) + + def list_bootstrap_actions(self, cluster_id, marker=None): + """ + Get a list of bootstrap actions for an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListBootstrapActions', params, BootstrapActionList) + + def list_clusters(self, created_after=None, created_before=None, + cluster_states=None, marker=None): + """ + List Elastic MapReduce clusters with optional filtering + + :type created_after: datetime + :param created_after: Bound on cluster creation time + :type created_before: datetime + :param created_before: Bound on cluster creation time + :type cluster_states: list + :param cluster_states: Bound on cluster states + :type marker: str + :param marker: Pagination marker + """ + params = {} + if created_after: + params['CreatedAfter'] = created_after.strftime( + boto.utils.ISO8601) + if created_before: + params['CreatedBefore'] = created_before.strftime( + boto.utils.ISO8601) + if marker: + params['Marker'] = marker + + if cluster_states: + self.build_list_params(params, cluster_states, 'ClusterStates.member') + + return self.get_object('ListClusters', params, ClusterSummaryList) + + def list_instance_groups(self, cluster_id, marker=None): + """ + List EC2 instance groups in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListInstanceGroups', params, InstanceGroupList) + + def list_instances(self, cluster_id, instance_group_id=None, + instance_group_types=None, marker=None): + """ + List EC2 instances in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type instance_group_id: str + :param instance_group_id: The EC2 instance group id of interest + :type instance_group_types: list + :param instance_group_types: Filter by EC2 instance group type + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if instance_group_id: + params['InstanceGroupId'] = instance_group_id + if marker: + params['Marker'] = marker + + if instance_group_types: + self.build_list_params(params, instance_group_types, + 'InstanceGroupTypes.member') + + return self.get_object('ListInstances', params, InstanceList) + + def list_steps(self, cluster_id, step_states=None, marker=None): + """ + List cluster steps + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_states: list + :param step_states: Filter by step states + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + if step_states: + self.build_list_params(params, step_states, 'StepStates.member') + + return self.get_object('ListSteps', params, StepSummaryList) + + def add_tags(self, resource_id, tags): + """ + Create new metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: dict + :param tags: A dictionary containing the name/value pairs. + If you want to create only a tag name, the + value for that tag should be the empty string + (e.g. '') or None. + """ + assert isinstance(resource_id, six.string_types) + params = { + 'ResourceId': resource_id, + } + params.update(self._build_tag_list(tags)) + return self.get_status('AddTags', params, verb='POST') + + def remove_tags(self, resource_id, tags): + """ + Remove metadata tags for the specified resource id. + + :type resource_id: str + :param resource_id: The cluster id + + :type tags: list + :param tags: A list of tag names to remove. + """ + params = { + 'ResourceId': resource_id, + } + params.update(self._build_string_list('TagKeys', tags)) + return self.get_status('RemoveTags', params, verb='POST') + + def terminate_jobflow(self, jobflow_id): + """ + Terminate an Elastic MapReduce job flow + + :type jobflow_id: str + :param jobflow_id: A jobflow id + """ + self.terminate_jobflows([jobflow_id]) + + def terminate_jobflows(self, jobflow_ids): + """ + Terminate an Elastic MapReduce job flow + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + """ + params = {} + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + return self.get_status('TerminateJobFlows', params, verb='POST') + + def add_jobflow_steps(self, jobflow_id, steps): + """ + Adds steps to a jobflow + + :type jobflow_id: str + :param jobflow_id: The job flow id + :type steps: list(boto.emr.Step) + :param steps: A list of steps to add to the job + """ + if not isinstance(steps, list): + steps = [steps] + params = {} + params['JobFlowId'] = jobflow_id + + # Step args + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + return self.get_object( + 'AddJobFlowSteps', params, JobFlowStepList, verb='POST') + + def add_instance_groups(self, jobflow_id, instance_groups): + """ + Adds instance groups to a running cluster. + + :type jobflow_id: str + :param jobflow_id: The id of the jobflow which will take the + new instance groups + + :type instance_groups: list(boto.emr.InstanceGroup) + :param instance_groups: A list of instance groups to add to the job + """ + if not isinstance(instance_groups, list): + instance_groups = [instance_groups] + params = {} + params['JobFlowId'] = jobflow_id + params.update(self._build_instance_group_list_args(instance_groups)) + + return self.get_object('AddInstanceGroups', params, + AddInstanceGroupsResponse, verb='POST') + + def modify_instance_groups(self, instance_group_ids, new_sizes): + """ + Modify the number of nodes and configuration settings in an + instance group. + + :type instance_group_ids: list(str) + :param instance_group_ids: A list of the ID's of the instance + groups to be modified + + :type new_sizes: list(int) + :param new_sizes: A list of the new sizes for each instance group + """ + if not isinstance(instance_group_ids, list): + instance_group_ids = [instance_group_ids] + if not isinstance(new_sizes, list): + new_sizes = [new_sizes] + + instance_groups = zip(instance_group_ids, new_sizes) + + params = {} + for k, ig in enumerate(instance_groups): + # could be wrong - the example amazon gives uses + # InstanceRequestCount, while the api documentation + # says InstanceCount + params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0] + params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1] + + return self.get_object('ModifyInstanceGroups', params, + ModifyInstanceGroupsResponse, verb='POST') + + def run_jobflow(self, name, log_uri=None, ec2_keyname=None, + availability_zone=None, + master_instance_type='m1.small', + slave_instance_type='m1.small', num_instances=1, + action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False, + enable_debugging=False, + hadoop_version=None, + steps=None, + bootstrap_actions=[], + instance_groups=None, + additional_info=None, + ami_version=None, + api_params=None, + visible_to_all_users=None, + job_flow_role=None, + service_role=None): + """ + Runs a job flow + :type name: str + :param name: Name of the job flow + + :type log_uri: str + :param log_uri: URI of the S3 bucket to place logs + + :type ec2_keyname: str + :param ec2_keyname: EC2 key used for the instances + + :type availability_zone: str + :param availability_zone: EC2 availability zone of the cluster + + :type master_instance_type: str + :param master_instance_type: EC2 instance type of the master + + :type slave_instance_type: str + :param slave_instance_type: EC2 instance type of the slave nodes + + :type num_instances: int + :param num_instances: Number of instances in the Hadoop cluster + + :type action_on_failure: str + :param action_on_failure: Action to take if a step terminates + + :type keep_alive: bool + :param keep_alive: Denotes whether the cluster should stay + alive upon completion + + :type enable_debugging: bool + :param enable_debugging: Denotes whether AWS console debugging + should be enabled. + + :type hadoop_version: str + :param hadoop_version: Version of Hadoop to use. This no longer + defaults to '0.20' and now uses the AMI default. + + :type steps: list(boto.emr.Step) + :param steps: List of steps to add with the job + + :type bootstrap_actions: list(boto.emr.BootstrapAction) + :param bootstrap_actions: List of bootstrap actions that run + before Hadoop starts. + + :type instance_groups: list(boto.emr.InstanceGroup) + :param instance_groups: Optional list of instance groups to + use when creating this job. + NB: When provided, this argument supersedes num_instances + and master/slave_instance_type. + + :type ami_version: str + :param ami_version: Amazon Machine Image (AMI) version to use + for instances. Values accepted by EMR are '1.0', '2.0', and + 'latest'; EMR currently defaults to '1.0' if you don't set + 'ami_version'. + + :type additional_info: JSON str + :param additional_info: A JSON string for selecting additional features + + :type api_params: dict + :param api_params: a dictionary of additional parameters to pass + directly to the EMR API (so you don't have to upgrade boto to + use new EMR features). You can also delete an API parameter + by setting it to None. + + :type visible_to_all_users: bool + :param visible_to_all_users: Whether the job flow is visible to all IAM + users of the AWS account associated with the job flow. If this + value is set to ``True``, all IAM users of that AWS + account can view and (if they have the proper policy permissions + set) manage the job flow. If it is set to ``False``, only + the IAM user that created the job flow can view and manage + it. + + :type job_flow_role: str + :param job_flow_role: An IAM role for the job flow. The EC2 + instances of the job flow assume this role. The default role is + ``EMRJobflowDefault``. In order to use the default role, + you must have already created it using the CLI. + + :type service_role: str + :param service_role: The IAM role that will be assumed by the Amazon + EMR service to access AWS resources on your behalf. + + :rtype: str + :return: The jobflow id + """ + steps = steps or [] + params = {} + if action_on_failure: + params['ActionOnFailure'] = action_on_failure + if log_uri: + params['LogUri'] = log_uri + params['Name'] = name + + # Common instance args + common_params = self._build_instance_common_args(ec2_keyname, + availability_zone, + keep_alive, + hadoop_version) + params.update(common_params) + + # NB: according to the AWS API's error message, we must + # "configure instances either using instance count, master and + # slave instance type or instance groups but not both." + # + # Thus we switch here on the truthiness of instance_groups. + if not instance_groups: + # Instance args (the common case) + instance_params = self._build_instance_count_and_type_args( + master_instance_type, + slave_instance_type, + num_instances) + params.update(instance_params) + else: + # Instance group args (for spot instances or a heterogenous cluster) + list_args = self._build_instance_group_list_args(instance_groups) + instance_params = dict( + ('Instances.%s' % k, v) for k, v in six.iteritems(list_args) + ) + params.update(instance_params) + + # Debugging step from EMR API docs + if enable_debugging: + debugging_step = JarStep(name='Setup Hadoop Debugging', + action_on_failure='TERMINATE_JOB_FLOW', + main_class=None, + jar=self.DebuggingJar.format(region_name=self.region.name), + step_args=self.DebuggingArgs.format(region_name=self.region.name)) + steps.insert(0, debugging_step) + + # Step args + if steps: + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + if bootstrap_actions: + bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions] + params.update(self._build_bootstrap_action_list(bootstrap_action_args)) + + if ami_version: + params['AmiVersion'] = ami_version + + if additional_info is not None: + params['AdditionalInfo'] = additional_info + + if api_params: + for key, value in six.iteritems(api_params): + if value is None: + params.pop(key, None) + else: + params[key] = value + + if visible_to_all_users is not None: + if visible_to_all_users: + params['VisibleToAllUsers'] = 'true' + else: + params['VisibleToAllUsers'] = 'false' + + if job_flow_role is not None: + params['JobFlowRole'] = job_flow_role + + if service_role is not None: + params['ServiceRole'] = service_role + + response = self.get_object( + 'RunJobFlow', params, RunJobFlowResponse, verb='POST') + return response.jobflowid + + def set_termination_protection(self, jobflow_id, + termination_protection_status): + """ + Set termination protection on specified Elastic MapReduce job flows + + :type jobflow_ids: list or str + :param jobflow_ids: A list of job flow IDs + + :type termination_protection_status: bool + :param termination_protection_status: Termination protection status + """ + assert termination_protection_status in (True, False) + + params = {} + params['TerminationProtected'] = (termination_protection_status and "true") or "false" + self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') + + return self.get_status('SetTerminationProtection', params, verb='POST') + + def set_visible_to_all_users(self, jobflow_id, visibility): + """ + Set whether specified Elastic Map Reduce job flows are visible to all IAM users + + :type jobflow_ids: list or str + :param jobflow_ids: A list of job flow IDs + + :type visibility: bool + :param visibility: Visibility + """ + assert visibility in (True, False) + + params = {} + params['VisibleToAllUsers'] = (visibility and "true") or "false" + self.build_list_params(params, [jobflow_id], 'JobFlowIds.member') + + return self.get_status('SetVisibleToAllUsers', params, verb='POST') + + def _build_bootstrap_action_args(self, bootstrap_action): + bootstrap_action_params = {} + bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path + + try: + bootstrap_action_params['Name'] = bootstrap_action.name + except AttributeError: + pass + + args = bootstrap_action.args() + if args: + self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member') + + return bootstrap_action_params + + def _build_step_args(self, step): + step_params = {} + step_params['ActionOnFailure'] = step.action_on_failure + step_params['HadoopJarStep.Jar'] = step.jar() + + main_class = step.main_class() + if main_class: + step_params['HadoopJarStep.MainClass'] = main_class + + args = step.args() + if args: + self.build_list_params(step_params, args, 'HadoopJarStep.Args.member') + + step_params['Name'] = step.name + return step_params + + def _build_bootstrap_action_list(self, bootstrap_actions): + if not isinstance(bootstrap_actions, list): + bootstrap_actions = [bootstrap_actions] + + params = {} + for i, bootstrap_action in enumerate(bootstrap_actions): + for key, value in six.iteritems(bootstrap_action): + params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value + return params + + def _build_step_list(self, steps): + if not isinstance(steps, list): + steps = [steps] + + params = {} + for i, step in enumerate(steps): + for key, value in six.iteritems(step): + params['Steps.member.%s.%s' % (i+1, key)] = value + return params + + def _build_string_list(self, field, items): + if not isinstance(items, list): + items = [items] + + params = {} + for i, item in enumerate(items): + params['%s.member.%s' % (field, i + 1)] = item + return params + + def _build_tag_list(self, tags): + assert isinstance(tags, dict) + + params = {} + for i, key_value in enumerate(sorted(six.iteritems(tags)), start=1): + key, value = key_value + current_prefix = 'Tags.member.%s' % i + params['%s.Key' % current_prefix] = key + if value: + params['%s.Value' % current_prefix] = value + return params + + def _build_instance_common_args(self, ec2_keyname, availability_zone, + keep_alive, hadoop_version): + """ + Takes a number of parameters used when starting a jobflow (as + specified in run_jobflow() above). Returns a comparable dict for + use in making a RunJobFlow request. + """ + params = { + 'Instances.KeepJobFlowAliveWhenNoSteps': str(keep_alive).lower(), + } + + if hadoop_version: + params['Instances.HadoopVersion'] = hadoop_version + if ec2_keyname: + params['Instances.Ec2KeyName'] = ec2_keyname + if availability_zone: + params['Instances.Placement.AvailabilityZone'] = availability_zone + + return params + + def _build_instance_count_and_type_args(self, master_instance_type, + slave_instance_type, num_instances): + """ + Takes a master instance type (string), a slave instance type + (string), and a number of instances. Returns a comparable dict + for use in making a RunJobFlow request. + """ + params = {'Instances.MasterInstanceType': master_instance_type, + 'Instances.SlaveInstanceType': slave_instance_type, + 'Instances.InstanceCount': num_instances} + return params + + def _build_instance_group_args(self, instance_group): + """ + Takes an InstanceGroup; returns a dict that, when its keys are + properly prefixed, can be used for describing InstanceGroups in + RunJobFlow or AddInstanceGroups requests. + """ + params = {'InstanceCount': instance_group.num_instances, + 'InstanceRole': instance_group.role, + 'InstanceType': instance_group.type, + 'Name': instance_group.name, + 'Market': instance_group.market} + if instance_group.market == 'SPOT': + params['BidPrice'] = instance_group.bidprice + return params + + def _build_instance_group_list_args(self, instance_groups): + """ + Takes a list of InstanceGroups, or a single InstanceGroup. Returns + a comparable dict for use in making a RunJobFlow or AddInstanceGroups + request. + """ + if not isinstance(instance_groups, list): + instance_groups = [instance_groups] + + params = {} + for i, instance_group in enumerate(instance_groups): + ig_dict = self._build_instance_group_args(instance_group) + for key, value in six.iteritems(ig_dict): + params['InstanceGroups.member.%d.%s' % (i+1, key)] = value + return params diff --git a/ext/boto/emr/emrobject.py b/ext/boto/emr/emrobject.py new file mode 100644 index 0000000000..73f7060ba1 --- /dev/null +++ b/ext/boto/emr/emrobject.py @@ -0,0 +1,511 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010 Jeremy Thurgood +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module contains EMR response objects +""" + +from boto.resultset import ResultSet + + +class EmrObject(object): + Fields = set() + + def __init__(self, connection=None): + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in self.Fields: + setattr(self, name.lower(), value) + + +class RunJobFlowResponse(EmrObject): + Fields = set(['JobFlowId']) + +class AddInstanceGroupsResponse(EmrObject): + Fields = set(['InstanceGroupIds', 'JobFlowId']) + +class ModifyInstanceGroupsResponse(EmrObject): + Fields = set(['RequestId']) + + +class Arg(EmrObject): + def __init__(self, connection=None): + self.value = None + + def endElement(self, name, value, connection): + self.value = value + + +class StepId(Arg): + pass + + +class SupportedProduct(Arg): + pass + + +class JobFlowStepList(EmrObject): + def __ini__(self, connection=None): + self.connection = connection + self.stepids = None + + def startElement(self, name, attrs, connection): + if name == 'StepIds': + self.stepids = ResultSet([('member', StepId)]) + return self.stepids + else: + return None + + +class BootstrapAction(EmrObject): + Fields = set([ + 'Args', + 'Name', + 'Path', + 'ScriptPath', + ]) + + def startElement(self, name, attrs, connection): + if name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + + +class KeyValue(EmrObject): + Fields = set([ + 'Key', + 'Value', + ]) + + +class Step(EmrObject): + Fields = set([ + 'ActionOnFailure', + 'CreationDateTime', + 'EndDateTime', + 'Jar', + 'LastStateChangeReason', + 'MainClass', + 'Name', + 'StartDateTime', + 'State', + ]) + + def __init__(self, connection=None): + self.connection = connection + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + if name == 'Properties': + self.properties = ResultSet([('member', KeyValue)]) + return self.properties + + +class InstanceGroup(EmrObject): + Fields = set([ + 'BidPrice', + 'CreationDateTime', + 'EndDateTime', + 'InstanceGroupId', + 'InstanceRequestCount', + 'InstanceRole', + 'InstanceRunningCount', + 'InstanceType', + 'LastStateChangeReason', + 'LaunchGroup', + 'Market', + 'Name', + 'ReadyDateTime', + 'StartDateTime', + 'State', + ]) + + +class JobFlow(EmrObject): + Fields = set([ + 'AmiVersion', + 'AvailabilityZone', + 'CreationDateTime', + 'Ec2KeyName', + 'EndDateTime', + 'HadoopVersion', + 'Id', + 'InstanceCount', + 'JobFlowId', + 'KeepJobFlowAliveWhenNoSteps', + 'LastStateChangeReason', + 'LogUri', + 'MasterInstanceId', + 'MasterInstanceType', + 'MasterPublicDnsName', + 'Name', + 'NormalizedInstanceHours', + 'ReadyDateTime', + 'RequestId', + 'SlaveInstanceType', + 'StartDateTime', + 'State', + 'TerminationProtected', + 'Type', + 'Value', + 'VisibleToAllUsers', + ]) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + self.instancegroups = None + self.bootstrapactions = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', Step)]) + return self.steps + elif name == 'InstanceGroups': + self.instancegroups = ResultSet([('member', InstanceGroup)]) + return self.instancegroups + elif name == 'BootstrapActions': + self.bootstrapactions = ResultSet([('member', BootstrapAction)]) + return self.bootstrapactions + elif name == 'SupportedProducts': + self.supported_products = ResultSet([('member', SupportedProduct)]) + return self.supported_products + else: + return None + + +class ClusterTimeline(EmrObject): + Fields = set([ + 'CreationDateTime', + 'ReadyDateTime', + 'EndDateTime' + ]) + +class ClusterStateChangeReason(EmrObject): + Fields = set([ + 'Code', + 'Message' + ]) + +class ClusterStatus(EmrObject): + Fields = set([ + 'State', + 'StateChangeReason', + 'Timeline' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.timeline = None + + def startElement(self, name, attrs, connection): + if name == 'Timeline': + self.timeline = ClusterTimeline() + return self.timeline + elif name == 'StateChangeReason': + self.statechangereason = ClusterStateChangeReason() + return self.statechangereason + else: + return None + + +class Ec2InstanceAttributes(EmrObject): + Fields = set([ + 'Ec2KeyName', + 'Ec2SubnetId', + 'Ec2AvailabilityZone', + 'IamInstanceProfile' + ]) + + +class Application(EmrObject): + Fields = set([ + 'Name', + 'Version', + 'Args', + 'AdditionalInfo' + ]) + + +class Cluster(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'LogUri', + 'RequestedAmiVersion', + 'RunningAmiVersion', + 'AutoTerminate', + 'TerminationProtected', + 'VisibleToAllUsers', + 'MasterPublicDnsName', + 'NormalizedInstanceHours', + 'ServiceRole' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + self.ec2instanceattributes = None + self.applications = None + self.tags = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + elif name == 'Ec2InstanceAttributes': + self.ec2instanceattributes = Ec2InstanceAttributes() + return self.ec2instanceattributes + elif name == 'Applications': + self.applications = ResultSet([('member', Application)]) + return self.applications + elif name == 'Tags': + self.tags = ResultSet([('member', KeyValue)]) + return self.tags + else: + return None + + +class ClusterSummary(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'NormalizedInstanceHours' + ]) + + def __init__(self, connection): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class ClusterSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection): + self.connection = connection + self.clusters = None + + def startElement(self, name, attrs, connection): + if name == 'Clusters': + self.clusters = ResultSet([('member', ClusterSummary)]) + return self.clusters + else: + return None + + +class StepConfig(EmrObject): + Fields = set([ + 'Jar', + 'MainClass' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.properties = None + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Properties': + self.properties = ResultSet([('member', KeyValue)]) + return self.properties + elif name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + else: + return None + + +class HadoopStep(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'ActionOnFailure' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.config = None + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Config': + self.config = StepConfig() + return self.config + elif name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + + +class InstanceGroupInfo(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'Market', + 'InstanceGroupType', + 'BidPrice', + 'InstanceType', + 'RequestedInstanceCount', + 'RunningInstanceCount' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceGroupList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instancegroups = None + + def startElement(self, name, attrs, connection): + if name == 'InstanceGroups': + self.instancegroups = ResultSet([('member', InstanceGroupInfo)]) + return self.instancegroups + else: + return None + + +class InstanceInfo(EmrObject): + Fields = set([ + 'Id', + 'Ec2InstanceId', + 'PublicDnsName', + 'PublicIpAddress', + 'PrivateDnsName', + 'PrivateIpAddress' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instances = None + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + else: + return None + + +class StepSummary(EmrObject): + Fields = set([ + 'Id', + 'Name' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + self.config = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + elif name == 'Config': + self.config = StepConfig() + return self.config + else: + return None + + +class StepSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', StepSummary)]) + return self.steps + else: + return None + + +class BootstrapActionList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.actions = None + + def startElement(self, name, attrs, connection): + if name == 'BootstrapActions': + self.actions = ResultSet([('member', BootstrapAction)]) + return self.actions + else: + return None diff --git a/ext/boto/emr/instance_group.py b/ext/boto/emr/instance_group.py new file mode 100644 index 0000000000..6ab63c5d5a --- /dev/null +++ b/ext/boto/emr/instance_group.py @@ -0,0 +1,43 @@ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class InstanceGroup(object): + def __init__(self, num_instances, role, type, market, name, bidprice=None): + self.num_instances = num_instances + self.role = role + self.type = type + self.market = market + self.name = name + if market == 'SPOT': + if not bidprice: + raise ValueError('bidprice must be specified if market == SPOT') + self.bidprice = str(bidprice) + + def __repr__(self): + if self.market == 'SPOT': + return '%s.%s(name=%r, num_instances=%r, role=%r, type=%r, market = %r, bidprice = %r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.num_instances, self.role, self.type, self.market, + self.bidprice) + else: + return '%s.%s(name=%r, num_instances=%r, role=%r, type=%r, market = %r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.num_instances, self.role, self.type, self.market) diff --git a/ext/boto/emr/step.py b/ext/boto/emr/step.py new file mode 100644 index 0000000000..de6835fb4e --- /dev/null +++ b/ext/boto/emr/step.py @@ -0,0 +1,283 @@ +# Copyright (c) 2010 Spotify AB +# Copyright (c) 2010-2011 Yelp +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + + +class Step(object): + """ + Jobflow Step base class + """ + def jar(self): + """ + :rtype: str + :return: URI to the jar + """ + raise NotImplemented() + + def args(self): + """ + :rtype: list(str) + :return: List of arguments for the step + """ + raise NotImplemented() + + def main_class(self): + """ + :rtype: str + :return: The main class name + """ + raise NotImplemented() + + +class JarStep(Step): + """ + Custom jar step + """ + def __init__(self, name, jar, main_class=None, + action_on_failure='TERMINATE_JOB_FLOW', step_args=None): + """ + A elastic mapreduce step that executes a jar + + :type name: str + :param name: The name of the step + :type jar: str + :param jar: S3 URI to the Jar file + :type main_class: str + :param main_class: The class to execute in the jar + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to + take on failure. + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + """ + self.name = name + self._jar = jar + self._main_class = main_class + self.action_on_failure = action_on_failure + + if isinstance(step_args, six.string_types): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return self._jar + + def args(self): + args = [] + + if self.step_args: + args.extend(self.step_args) + + return args + + def main_class(self): + return self._main_class + + +class StreamingStep(Step): + """ + Hadoop streaming step + """ + def __init__(self, name, mapper, reducer=None, combiner=None, + action_on_failure='TERMINATE_JOB_FLOW', + cache_files=None, cache_archives=None, + step_args=None, input=None, output=None, + jar='/home/hadoop/contrib/streaming/hadoop-streaming.jar'): + """ + A hadoop streaming elastic mapreduce step + + :type name: str + :param name: The name of the step + :type mapper: str + :param mapper: The mapper URI + :type reducer: str + :param reducer: The reducer URI + :type combiner: str + :param combiner: The combiner URI. Only works for Hadoop 0.20 + and later! + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to + take on failure. + :type cache_files: list(str) + :param cache_files: A list of cache files to be bundled with the job + :type cache_archives: list(str) + :param cache_archives: A list of jar archives to be bundled with + the job + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + :type input: str or a list of str + :param input: The input uri + :type output: str + :param output: The output uri + :type jar: str + :param jar: The hadoop streaming jar. This can be either a local + path on the master node, or an s3:// URI. + """ + self.name = name + self.mapper = mapper + self.reducer = reducer + self.combiner = combiner + self.action_on_failure = action_on_failure + self.cache_files = cache_files + self.cache_archives = cache_archives + self.input = input + self.output = output + self._jar = jar + + if isinstance(step_args, six.string_types): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return self._jar + + def main_class(self): + return None + + def args(self): + args = [] + + # put extra args BEFORE -mapper and -reducer so that e.g. -libjar + # will work + if self.step_args: + args.extend(self.step_args) + + args.extend(['-mapper', self.mapper]) + + if self.combiner: + args.extend(['-combiner', self.combiner]) + + if self.reducer: + args.extend(['-reducer', self.reducer]) + else: + args.extend(['-jobconf', 'mapred.reduce.tasks=0']) + + if self.input: + if isinstance(self.input, list): + for input in self.input: + args.extend(('-input', input)) + else: + args.extend(('-input', self.input)) + if self.output: + args.extend(('-output', self.output)) + + if self.cache_files: + for cache_file in self.cache_files: + args.extend(('-cacheFile', cache_file)) + + if self.cache_archives: + for cache_archive in self.cache_archives: + args.extend(('-cacheArchive', cache_archive)) + + return args + + def __repr__(self): + return '%s.%s(name=%r, mapper=%r, reducer=%r, action_on_failure=%r, cache_files=%r, cache_archives=%r, step_args=%r, input=%r, output=%r, jar=%r)' % ( + self.__class__.__module__, self.__class__.__name__, + self.name, self.mapper, self.reducer, self.action_on_failure, + self.cache_files, self.cache_archives, self.step_args, + self.input, self.output, self._jar) + + +class ScriptRunnerStep(JarStep): + + ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' + + def __init__(self, name, **kw): + super(ScriptRunnerStep, self).__init__(name, self.ScriptRunnerJar, **kw) + + +class PigBase(ScriptRunnerStep): + + BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/pig/pig-script', + '--base-path', 's3n://us-east-1.elasticmapreduce/libs/pig/'] + + +class InstallPigStep(PigBase): + """ + Install pig on emr step + """ + + InstallPigName = 'Install Pig' + + def __init__(self, pig_versions='latest'): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--install-pig']) + step_args.extend(['--pig-versions', pig_versions]) + super(InstallPigStep, self).__init__(self.InstallPigName, step_args=step_args) + + +class PigStep(PigBase): + """ + Pig script step + """ + + def __init__(self, name, pig_file, pig_versions='latest', pig_args=[]): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--pig-versions', pig_versions]) + step_args.extend(['--run-pig-script', '--args', '-f', pig_file]) + step_args.extend(pig_args) + super(PigStep, self).__init__(name, step_args=step_args) + + +class HiveBase(ScriptRunnerStep): + + BaseArgs = ['s3n://us-east-1.elasticmapreduce/libs/hive/hive-script', + '--base-path', 's3n://us-east-1.elasticmapreduce/libs/hive/'] + + +class InstallHiveStep(HiveBase): + """ + Install Hive on EMR step + """ + InstallHiveName = 'Install Hive' + + def __init__(self, hive_versions='latest', hive_site=None): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--install-hive']) + step_args.extend(['--hive-versions', hive_versions]) + if hive_site is not None: + step_args.extend(['--hive-site=%s' % hive_site]) + super(InstallHiveStep, self).__init__(self.InstallHiveName, + step_args=step_args) + + +class HiveStep(HiveBase): + """ + Hive script step + """ + + def __init__(self, name, hive_file, hive_versions='latest', + hive_args=None): + step_args = [] + step_args.extend(self.BaseArgs) + step_args.extend(['--hive-versions', hive_versions]) + step_args.extend(['--run-hive-script', '--args', '-f', hive_file]) + if hive_args is not None: + step_args.extend(hive_args) + super(HiveStep, self).__init__(name, step_args=step_args) diff --git a/ext/boto/endpoints.json b/ext/boto/endpoints.json new file mode 100644 index 0000000000..d9fdef69e7 --- /dev/null +++ b/ext/boto/endpoints.json @@ -0,0 +1,1296 @@ +{ + "partitions": [ + { + "defaults": { + "hostname": "{service}.{region}.{dnsSuffix}", + "protocols": [ + "https" + ], + "signatureVersions": [ + "v4" + ] + }, + "dnsSuffix": "amazonaws.com", + "partition": "aws", + "partitionName": "AWS Standard", + "regionRegex": "^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$", + "regions": { + "ap-northeast-1": { + "description": "Asia Pacific (Tokyo)" + }, + "ap-northeast-2": { + "description": "Asia Pacific (Seoul)" + }, + "ap-south-1": { + "description": "Asia Pacific (Mumbai)" + }, + "ap-southeast-1": { + "description": "Asia Pacific (Singapore)" + }, + "ap-southeast-2": { + "description": "Asia Pacific (Sydney)" + }, + "ca-central-1": { + "description": "Canada (Central)" + }, + "eu-central-1": { + "description": "EU (Frankfurt)" + }, + "eu-west-1": { + "description": "EU (Ireland)" + }, + "eu-west-2": { + "description": "EU (London)" + }, + "sa-east-1": { + "description": "South America (Sao Paulo)" + }, + "us-east-1": { + "description": "US East (N. Virginia)" + }, + "us-east-2": { + "description": "US East (Ohio)" + }, + "us-west-1": { + "description": "US West (N. California)" + }, + "us-west-2": { + "description": "US West (Oregon)" + } + }, + "services": { + "autoscaling": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "cloudformation": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "cloudfront": { + "endpoints": { + "aws-global": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "cloudfront.amazonaws.com", + "protocols": [ + "http", + "https" + ] + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-global" + }, + "cloudhsm": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "cloudsearch": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "cloudtrail": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "codedeploy": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "cognito-identity": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-2": {} + } + }, + "cognito-sync": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-2": {} + } + }, + "config": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "datapipeline": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-2": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-west-2": {} + } + }, + "directconnect": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "dynamodb": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "local": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "localhost:8000", + "protocols": [ + "http" + ] + }, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "ec2": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "ecs": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "elasticache": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "elasticbeanstalk": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "elasticloadbalancing": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "elasticmapreduce": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "sslCommonName": "{region}.{service}.{dnsSuffix}" + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": { + "sslCommonName": "{service}.{region}.{dnsSuffix}" + }, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": { + "sslCommonName": "{service}.{region}.{dnsSuffix}" + }, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "elastictranscoder": { + "endpoints": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "email": { + "endpoints": { + "eu-west-1": {}, + "us-east-1": {}, + "us-west-2": {} + } + }, + "glacier": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "iam": { + "endpoints": { + "aws-global": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "iam.amazonaws.com" + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-global" + }, + "importexport": { + "endpoints": { + "aws-global": { + "credentialScope": { + "region": "us-east-1", + "service": "IngestionService" + }, + "hostname": "importexport.amazonaws.com", + "signatureVersions": [ + "v2", + "v4" + ] + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-global" + }, + "kinesis": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "kms": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "lambda": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "logs": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "machinelearning": { + "endpoints": { + "eu-west-1": {}, + "us-east-1": {} + } + }, + "monitoring": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "opsworks": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "rds": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": { + "sslCommonName": "{service}.{dnsSuffix}" + }, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "redshift": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "route53": { + "endpoints": { + "aws-global": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "route53.amazonaws.com" + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-global" + }, + "route53domains": { + "endpoints": { + "us-east-1": {} + } + }, + "s3": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "signatureVersions": [ + "s3v4" + ] + }, + "endpoints": { + "ap-northeast-1": { + "hostname": "s3-ap-northeast-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": { + "hostname": "s3-ap-southeast-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "ap-southeast-2": { + "hostname": "s3-ap-southeast-2.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": { + "hostname": "s3-eu-west-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "eu-west-2": {}, + "s3-external-1": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "s3-external-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "sa-east-1": { + "hostname": "s3-sa-east-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "us-east-1": { + "hostname": "s3.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "us-east-2": {}, + "us-west-1": { + "hostname": "s3-us-west-1.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "us-west-2": { + "hostname": "s3-us-west-2.amazonaws.com", + "signatureVersions": [ + "s3", + "s3v4" + ] + } + }, + "isRegionalized": true, + "partitionEndpoint": "us-east-1" + }, + "sdb": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "signatureVersions": [ + "v2" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-west-1": {}, + "sa-east-1": {}, + "us-east-1": { + "hostname": "sdb.amazonaws.com" + }, + "us-west-1": {}, + "us-west-2": {} + } + }, + "sns": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "sqs": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "sslCommonName": "{region}.queue.{dnsSuffix}" + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": { + "sslCommonName": "queue.{dnsSuffix}" + }, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "storagegateway": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, + "sts": { + "defaults": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "sts.amazonaws.com" + }, + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": { + "credentialScope": { + "region": "ap-northeast-2" + }, + "hostname": "sts.ap-northeast-2.amazonaws.com" + }, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "aws-global": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + }, + "partitionEndpoint": "aws-global" + }, + "support": { + "endpoints": { + "us-east-1": {} + } + }, + "swf": { + "endpoints": { + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + } + } + }, + { + "defaults": { + "hostname": "{service}.{region}.{dnsSuffix}", + "protocols": [ + "https" + ], + "signatureVersions": [ + "v4" + ] + }, + "dnsSuffix": "amazonaws.com.cn", + "partition": "aws-cn", + "partitionName": "AWS China", + "regionRegex": "^cn\\-\\w+\\-\\d+$", + "regions": { + "cn-north-1": { + "description": "China (Beijing)" + } + }, + "services": { + "autoscaling": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "cloudformation": { + "endpoints": { + "cn-north-1": {} + } + }, + "cloudtrail": { + "endpoints": { + "cn-north-1": {} + } + }, + "config": { + "endpoints": { + "cn-north-1": {} + } + }, + "directconnect": { + "endpoints": { + "cn-north-1": {} + } + }, + "dynamodb": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "ec2": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "elasticache": { + "endpoints": { + "cn-north-1": {} + } + }, + "elasticbeanstalk": { + "endpoints": { + "cn-north-1": {} + } + }, + "elasticloadbalancing": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "elasticmapreduce": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "glacier": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "iam": { + "endpoints": { + "aws-cn-global": { + "credentialScope": { + "region": "cn-north-1" + }, + "hostname": "iam.cn-north-1.amazonaws.com.cn" + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-cn-global" + }, + "kinesis": { + "endpoints": { + "cn-north-1": {} + } + }, + "logs": { + "endpoints": { + "cn-north-1": {} + } + }, + "monitoring": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "rds": { + "endpoints": { + "cn-north-1": {} + } + }, + "redshift": { + "endpoints": { + "cn-north-1": {} + } + }, + "s3": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "signatureVersions": [ + "s3v4" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "sns": { + "defaults": { + "protocols": [ + "http", + "https" + ] + }, + "endpoints": { + "cn-north-1": {} + } + }, + "sqs": { + "defaults": { + "protocols": [ + "http", + "https" + ], + "sslCommonName": "{region}.queue.{dnsSuffix}" + }, + "endpoints": { + "cn-north-1": {} + } + }, + "storagegateway": { + "endpoints": { + "cn-north-1": {} + } + }, + "sts": { + "endpoints": { + "cn-north-1": {} + } + }, + "swf": { + "endpoints": { + "cn-north-1": {} + } + } + } + }, + { + "defaults": { + "hostname": "{service}.{region}.{dnsSuffix}", + "protocols": [ + "https" + ], + "signatureVersions": [ + "v4" + ] + }, + "dnsSuffix": "amazonaws.com", + "partition": "aws-us-gov", + "partitionName": "AWS GovCloud (US)", + "regionRegex": "^us\\-gov\\-\\w+\\-\\d+$", + "regions": { + "us-gov-west-1": { + "description": "AWS GovCloud (US)" + } + }, + "services": { + "autoscaling": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ] + } + } + }, + "cloudformation": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "cloudhsm": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "cloudtrail": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "config": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "directconnect": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "dynamodb": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "ec2": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "elasticache": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "elasticloadbalancing": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ] + } + } + }, + "elasticmapreduce": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ] + } + } + }, + "glacier": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ] + } + } + }, + "iam": { + "endpoints": { + "aws-us-gov-global": { + "credentialScope": { + "region": "us-gov-west-1" + }, + "hostname": "iam.us-gov.amazonaws.com" + } + }, + "isRegionalized": false, + "partitionEndpoint": "aws-us-gov-global" + }, + "kms": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "logs": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "monitoring": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "rds": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "redshift": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "s3": { + "defaults": { + "signatureVersions": [ + "s3", + "s3v4" + ] + }, + "endpoints": { + "fips-us-gov-west-1": { + "credentialScope": { + "region": "us-gov-west-1" + }, + "hostname": "s3-fips-us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1": { + "hostname": "s3-us-gov-west-1.amazonaws.com", + "protocols": [ + "http", + "https" + ] + } + } + }, + "sns": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ] + } + } + }, + "sqs": { + "endpoints": { + "us-gov-west-1": { + "protocols": [ + "http", + "https" + ], + "sslCommonName": "{region}.queue.{dnsSuffix}" + } + } + }, + "sts": { + "endpoints": { + "us-gov-west-1": {} + } + }, + "swf": { + "endpoints": { + "us-gov-west-1": {} + } + } + } + } + ], + "version": 3 +} diff --git a/ext/boto/endpoints.py b/ext/boto/endpoints.py new file mode 100644 index 0000000000..d29b2d34bf --- /dev/null +++ b/ext/boto/endpoints.py @@ -0,0 +1,239 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import boto.vendored.regions.regions as _regions + + +class _CompatEndpointResolver(_regions.EndpointResolver): + """Endpoint resolver which handles boto2 compatibility concerns. + + This is NOT intended for external use whatsoever. + """ + + _DEFAULT_SERVICE_RENAMES = { + # The botocore resolver is based on endpoint prefix. + # These don't always sync up to the name that boto2 uses. + # A mapping can be provided that handles the mapping between + # "service names" and endpoint prefixes. + 'awslambda': 'lambda', + 'cloudwatch': 'monitoring', + 'ses': 'email', + 'ec2containerservice': 'ecs', + 'configservice': 'config', + } + + def __init__(self, endpoint_data, service_rename_map=None): + """ + :type endpoint_data: dict + :param endpoint_data: Regions and endpoints data in the same format + as is used by botocore / boto3. + + :type service_rename_map: dict + :param service_rename_map: A mapping of boto2 service name to + endpoint prefix. + """ + super(_CompatEndpointResolver, self).__init__(endpoint_data) + if service_rename_map is None: + service_rename_map = self._DEFAULT_SERVICE_RENAMES + # Mapping of boto2 service name to endpoint prefix + self._endpoint_prefix_map = service_rename_map + # Mapping of endpoint prefix to boto2 service name + self._service_name_map = dict( + (v, k) for k, v in service_rename_map.items()) + + def get_available_endpoints(self, service_name, partition_name='aws', + allow_non_regional=False): + endpoint_prefix = self._endpoint_prefix(service_name) + return super(_CompatEndpointResolver, self).get_available_endpoints( + endpoint_prefix, partition_name, allow_non_regional) + + def get_all_available_regions(self, service_name): + """Retrieve every region across partitions for a service.""" + regions = set() + endpoint_prefix = self._endpoint_prefix(service_name) + + # Get every region for every partition in the new endpoint format + for partition_name in self.get_available_partitions(): + if self._is_global_service(service_name, partition_name): + # Global services are available in every region in the + # partition in which they are considered global. + partition = self._get_partition_data(partition_name) + regions.update(partition['regions'].keys()) + continue + else: + regions.update( + self.get_available_endpoints( + endpoint_prefix, partition_name) + ) + + return list(regions) + + def construct_endpoint(self, service_name, region_name=None): + endpoint_prefix = self._endpoint_prefix(service_name) + return super(_CompatEndpointResolver, self).construct_endpoint( + endpoint_prefix, region_name) + + def get_available_services(self): + """Get a list of all the available services in the endpoints file(s)""" + services = set() + + for partition in self._endpoint_data['partitions']: + services.update(partition['services'].keys()) + + return [self._service_name(s) for s in services] + + def _is_global_service(self, service_name, partition_name='aws'): + """Determines whether a service uses a global endpoint. + + In theory a service can be 'global' in one partition but regional in + another. In practice, each service is all global or all regional. + """ + endpoint_prefix = self._endpoint_prefix(service_name) + partition = self._get_partition_data(partition_name) + service = partition['services'].get(endpoint_prefix, {}) + return 'partitionEndpoint' in service + + def _get_partition_data(self, partition_name): + """Get partition information for a particular partition. + + This should NOT be used to get service endpoint data because it only + loads from the new endpoint format. It should only be used for + partition metadata and partition specific service metadata. + + :type partition_name: str + :param partition_name: The name of the partition to search for. + + :returns: Partition info from the new endpoints format. + :rtype: dict or None + """ + for partition in self._endpoint_data['partitions']: + if partition['partition'] == partition_name: + return partition + raise ValueError( + "Could not find partition data for: %s" % partition_name) + + def _endpoint_prefix(self, service_name): + """Given a boto2 service name, get the endpoint prefix.""" + return self._endpoint_prefix_map.get(service_name, service_name) + + def _service_name(self, endpoint_prefix): + """Given an endpoint prefix, get the boto2 service name.""" + return self._service_name_map.get(endpoint_prefix, endpoint_prefix) + + +class BotoEndpointResolver(object): + """Resolves endpoint hostnames for AWS services. + + This is NOT intended for external use. + """ + + def __init__(self, endpoint_data, service_rename_map=None): + """ + :type endpoint_data: dict + :param endpoint_data: Regions and endpoints data in the same format + as is used by botocore / boto3. + + :type service_rename_map: dict + :param service_rename_map: A mapping of boto2 service name to + endpoint prefix. + """ + self._resolver = _CompatEndpointResolver( + endpoint_data, service_rename_map) + + def resolve_hostname(self, service_name, region_name): + """Resolve the hostname for a service in a particular region. + + :type service_name: str + :param service_name: The service to look up. + + :type region_name: str + :param region_name: The region to find the endpoint for. + + :return: The hostname for the given service in the given region. + """ + endpoint = self._resolver.construct_endpoint(service_name, region_name) + if endpoint is None: + return None + return endpoint.get('sslCommonName', endpoint['hostname']) + + def get_all_available_regions(self, service_name): + """Get all the regions a service is available in. + + :type service_name: str + :param service_name: The service to look up. + + :rtype: list of str + :return: A list of all the regions the given service is available in. + """ + return self._resolver.get_all_available_regions(service_name) + + def get_available_services(self): + """Get all the services supported by the endpoint data. + + :rtype: list of str + :return: A list of all the services explicitly contained within the + endpoint data provided during instantiation. + """ + return self._resolver.get_available_services() + + +class StaticEndpointBuilder(object): + """Builds a static mapping of endpoints in the legacy format.""" + + def __init__(self, resolver): + """ + :type resolver: BotoEndpointResolver + :param resolver: An endpoint resolver. + """ + self._resolver = resolver + + def build_static_endpoints(self, service_names=None): + """Build a set of static endpoints in the legacy boto2 format. + + :param service_names: The names of the services to build. They must + use the names that boto2 uses, not boto3, e.g "ec2containerservice" + and not "ecs". If no service names are provided, all available + services will be built. + + :return: A dict consisting of:: + {"service": {"region": "full.host.name"}} + """ + if service_names is None: + service_names = self._resolver.get_available_services() + + static_endpoints = {} + for name in service_names: + endpoints_for_service = self._build_endpoints_for_service(name) + if endpoints_for_service: + # It's possible that when we try to build endpoints for + # services we get an empty hash. In that case we don't + # bother adding it to the final list of static endpoints. + static_endpoints[name] = endpoints_for_service + self._handle_special_cases(static_endpoints) + return static_endpoints + + def _build_endpoints_for_service(self, service_name): + # Given a service name, 'ec2', build a dict of + # 'region' -> 'hostname' + endpoints = {} + regions = self._resolver.get_all_available_regions(service_name) + for region_name in regions: + endpoints[region_name] = self._resolver.resolve_hostname( + service_name, region_name) + return endpoints + + def _handle_special_cases(self, static_endpoints): + # cloudsearchdomain endpoints use the exact same set of endpoints as + # cloudsearch. + if 'cloudsearch' in static_endpoints: + cloudsearch_endpoints = static_endpoints['cloudsearch'] + static_endpoints['cloudsearchdomain'] = cloudsearch_endpoints diff --git a/ext/boto/exception.py b/ext/boto/exception.py new file mode 100644 index 0000000000..2f175979b2 --- /dev/null +++ b/ext/boto/exception.py @@ -0,0 +1,585 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Exception classes - Subclassing allows you to check for specific errors +""" +import base64 +import xml.sax + +import boto + +from boto import handler +from boto.compat import json, StandardError +from boto.resultset import ResultSet + + +class BotoClientError(StandardError): + """ + General Boto Client error (error accessing AWS) + """ + def __init__(self, reason, *args): + super(BotoClientError, self).__init__(reason, *args) + self.reason = reason + + def __repr__(self): + return 'BotoClientError: %s' % self.reason + + def __str__(self): + return 'BotoClientError: %s' % self.reason + + +class SDBPersistenceError(StandardError): + pass + + +class StoragePermissionsError(BotoClientError): + """ + Permissions error when accessing a bucket or key on a storage service. + """ + pass + + +class S3PermissionsError(StoragePermissionsError): + """ + Permissions error when accessing a bucket or key on S3. + """ + pass + + +class GSPermissionsError(StoragePermissionsError): + """ + Permissions error when accessing a bucket or key on GS. + """ + pass + + +class BotoServerError(StandardError): + def __init__(self, status, reason, body=None, *args): + super(BotoServerError, self).__init__(status, reason, body, *args) + self.status = status + self.reason = reason + self.body = body or '' + self.request_id = None + self.error_code = None + self._error_message = None + self.message = '' + self.box_usage = None + + if isinstance(self.body, bytes): + try: + self.body = self.body.decode('utf-8') + except UnicodeDecodeError: + boto.log.debug('Unable to decode body from bytes!') + + # Attempt to parse the error response. If body isn't present, + # then just ignore the error response. + if self.body: + # Check if it looks like a ``dict``. + if hasattr(self.body, 'items'): + # It's not a string, so trying to parse it will fail. + # But since it's data, we can work with that. + self.request_id = self.body.get('RequestId', None) + + if 'Error' in self.body: + # XML-style + error = self.body.get('Error', {}) + self.error_code = error.get('Code', None) + self.message = error.get('Message', None) + else: + # JSON-style. + self.message = self.body.get('message', None) + else: + try: + h = handler.XmlHandlerWrapper(self, self) + h.parseString(self.body) + except (TypeError, xml.sax.SAXParseException): + # What if it's JSON? Let's try that. + try: + parsed = json.loads(self.body) + + if 'RequestId' in parsed: + self.request_id = parsed['RequestId'] + if 'Error' in parsed: + if 'Code' in parsed['Error']: + self.error_code = parsed['Error']['Code'] + if 'Message' in parsed['Error']: + self.message = parsed['Error']['Message'] + + except (TypeError, ValueError): + # Remove unparsable message body so we don't include garbage + # in exception. But first, save self.body in self.error_message + # because occasionally we get error messages from Eucalyptus + # that are just text strings that we want to preserve. + self.message = self.body + self.body = None + + def __getattr__(self, name): + if name == 'error_message': + return self.message + if name == 'code': + return self.error_code + raise AttributeError + + def __setattr__(self, name, value): + if name == 'error_message': + self.message = value + else: + super(BotoServerError, self).__setattr__(name, value) + + def __repr__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def __str__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ('RequestId', 'RequestID'): + self.request_id = value + elif name == 'Code': + self.error_code = value + elif name == 'Message': + self.message = value + elif name == 'BoxUsage': + self.box_usage = value + return None + + def _cleanupParsedProperties(self): + self.request_id = None + self.error_code = None + self.message = None + self.box_usage = None + + +class ConsoleOutput(object): + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.comment = None + self.output = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + + +class StorageCreateError(BotoServerError): + """ + Error creating a bucket or key on a storage service. + """ + def __init__(self, status, reason, body=None): + self.bucket = None + super(StorageCreateError, self).__init__(status, reason, body) + + def endElement(self, name, value, connection): + if name == 'BucketName': + self.bucket = value + else: + return super(StorageCreateError, self).endElement(name, value, connection) + + +class S3CreateError(StorageCreateError): + """ + Error creating a bucket or key on S3. + """ + pass + + +class GSCreateError(StorageCreateError): + """ + Error creating a bucket or key on GS. + """ + pass + + +class StorageCopyError(BotoServerError): + """ + Error copying a key on a storage service. + """ + pass + + +class S3CopyError(StorageCopyError): + """ + Error copying a key on S3. + """ + pass + + +class GSCopyError(StorageCopyError): + """ + Error copying a key on GS. + """ + pass + + +class SQSError(BotoServerError): + """ + General Error on Simple Queue Service. + """ + def __init__(self, status, reason, body=None): + self.detail = None + self.type = None + super(SQSError, self).__init__(status, reason, body) + + def startElement(self, name, attrs, connection): + return super(SQSError, self).startElement(name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Detail': + self.detail = value + elif name == 'Type': + self.type = value + else: + return super(SQSError, self).endElement(name, value, connection) + + def _cleanupParsedProperties(self): + super(SQSError, self)._cleanupParsedProperties() + for p in ('detail', 'type'): + setattr(self, p, None) + + +class SQSDecodeError(BotoClientError): + """ + Error when decoding an SQS message. + """ + def __init__(self, reason, message): + super(SQSDecodeError, self).__init__(reason, message) + self.message = message + + def __repr__(self): + return 'SQSDecodeError: %s' % self.reason + + def __str__(self): + return 'SQSDecodeError: %s' % self.reason + + +class StorageResponseError(BotoServerError): + """ + Error in response from a storage service. + """ + def __init__(self, status, reason, body=None): + self.resource = None + super(StorageResponseError, self).__init__(status, reason, body) + + def startElement(self, name, attrs, connection): + return super(StorageResponseError, self).startElement( + name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Resource': + self.resource = value + else: + return super(StorageResponseError, self).endElement( + name, value, connection) + + def _cleanupParsedProperties(self): + super(StorageResponseError, self)._cleanupParsedProperties() + for p in ('resource'): + setattr(self, p, None) + + +class S3ResponseError(StorageResponseError): + """ + Error in response from S3. + """ + pass + + +class GSResponseError(StorageResponseError): + """ + Error in response from GS. + """ + pass + + +class EC2ResponseError(BotoServerError): + """ + Error in response from EC2. + """ + def __init__(self, status, reason, body=None): + self.errors = None + self._errorResultSet = [] + super(EC2ResponseError, self).__init__(status, reason, body) + self.errors = [ + (e.error_code, e.error_message) for e in self._errorResultSet] + if len(self.errors): + self.error_code, self.error_message = self.errors[0] + + def startElement(self, name, attrs, connection): + if name == 'Errors': + self._errorResultSet = ResultSet([('Error', _EC2Error)]) + return self._errorResultSet + else: + return None + + def endElement(self, name, value, connection): + if name == 'RequestID': + self.request_id = value + else: + return None # don't call subclass here + + def _cleanupParsedProperties(self): + super(EC2ResponseError, self)._cleanupParsedProperties() + self._errorResultSet = [] + for p in ('errors'): + setattr(self, p, None) + + +class JSONResponseError(BotoServerError): + """ + This exception expects the fully parsed and decoded JSON response + body to be passed as the body parameter. + + :ivar status: The HTTP status code. + :ivar reason: The HTTP reason message. + :ivar body: The Python dict that represents the decoded JSON + response body. + :ivar error_message: The full description of the AWS error encountered. + :ivar error_code: A short string that identifies the AWS error + (e.g. ConditionalCheckFailedException) + """ + def __init__(self, status, reason, body=None, *args): + self.status = status + self.reason = reason + self.body = body + if self.body: + self.error_message = self.body.get('message', None) + self.error_code = self.body.get('__type', None) + if self.error_code: + self.error_code = self.error_code.split('#')[-1] + + +class DynamoDBResponseError(JSONResponseError): + pass + + +class SWFResponseError(JSONResponseError): + pass + + +class EmrResponseError(BotoServerError): + """ + Error in response from EMR + """ + pass + + +class _EC2Error(object): + def __init__(self, connection=None): + self.connection = connection + self.error_code = None + self.error_message = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Code': + self.error_code = value + elif name == 'Message': + self.error_message = value + else: + return None + + +class SDBResponseError(BotoServerError): + """ + Error in responses from SDB. + """ + pass + + +class AWSConnectionError(BotoClientError): + """ + General error connecting to Amazon Web Services. + """ + pass + + +class StorageDataError(BotoClientError): + """ + Error receiving data from a storage service. + """ + pass + + +class S3DataError(StorageDataError): + """ + Error receiving data from S3. + """ + pass + + +class GSDataError(StorageDataError): + """ + Error receiving data from GS. + """ + pass + + +class InvalidUriError(Exception): + """Exception raised when URI is invalid.""" + + def __init__(self, message): + super(InvalidUriError, self).__init__(message) + self.message = message + + +class InvalidAclError(Exception): + """Exception raised when ACL XML is invalid.""" + + def __init__(self, message): + super(InvalidAclError, self).__init__(message) + self.message = message + + +class InvalidCorsError(Exception): + """Exception raised when CORS XML is invalid.""" + + def __init__(self, message): + super(InvalidCorsError, self).__init__(message) + self.message = message + + +class NoAuthHandlerFound(Exception): + """Is raised when no auth handlers were found ready to authenticate.""" + pass + + +class InvalidLifecycleConfigError(Exception): + """Exception raised when GCS lifecycle configuration XML is invalid.""" + + def __init__(self, message): + super(InvalidLifecycleConfigError, self).__init__(message) + self.message = message + + +# Enum class for resumable upload failure disposition. +class ResumableTransferDisposition(object): + # START_OVER means an attempt to resume an existing transfer failed, + # and a new resumable upload should be attempted (without delay). + START_OVER = 'START_OVER' + + # WAIT_BEFORE_RETRY means the resumable transfer failed but that it can + # be retried after a time delay within the current process. + WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY' + + # ABORT_CUR_PROCESS means the resumable transfer failed and that + # delaying/retrying within the current process will not help. If + # resumable transfer included a state tracker file the upload can be + # retried again later, in another process (e.g., a later run of gsutil). + ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS' + + # ABORT means the resumable transfer failed in a way that it does not + # make sense to continue in the current process, and further that the + # current tracker ID should not be preserved (in a tracker file if one + # was specified at resumable upload start time). If the user tries again + # later (e.g., a separate run of gsutil) it will get a new resumable + # upload ID. + ABORT = 'ABORT' + + +class ResumableUploadException(Exception): + """ + Exception raised for various resumable upload problems. + + self.disposition is of type ResumableTransferDisposition. + """ + + def __init__(self, message, disposition): + super(ResumableUploadException, self).__init__(message, disposition) + self.message = message + self.disposition = disposition + + def __repr__(self): + return 'ResumableUploadException("%s", %s)' % ( + self.message, self.disposition) + + +class ResumableDownloadException(Exception): + """ + Exception raised for various resumable download problems. + + self.disposition is of type ResumableTransferDisposition. + """ + + def __init__(self, message, disposition): + super(ResumableDownloadException, self).__init__(message, disposition) + self.message = message + self.disposition = disposition + + def __repr__(self): + return 'ResumableDownloadException("%s", %s)' % ( + self.message, self.disposition) + + +class TooManyRecordsException(Exception): + """ + Exception raised when a search of Route53 records returns more + records than requested. + """ + + def __init__(self, message): + super(TooManyRecordsException, self).__init__(message) + self.message = message + + +class PleaseRetryException(Exception): + """ + Indicates a request should be retried. + """ + def __init__(self, message, response=None): + self.message = message + self.response = response + + def __repr__(self): + return 'PleaseRetryException("%s", %s)' % ( + self.message, + self.response + ) + + +class InvalidInstanceMetadataError(Exception): + MSG = ( + "You can set the 'metadata_service_num_attempts' " + "in your boto config file to increase the number " + "of times boto will attempt to retrieve " + "credentials from the instance metadata service." + ) + def __init__(self, msg): + final_msg = msg + '\n' + self.MSG + super(InvalidInstanceMetadataError, self).__init__(final_msg) diff --git a/ext/boto/file/README b/ext/boto/file/README new file mode 100644 index 0000000000..af824554e1 --- /dev/null +++ b/ext/boto/file/README @@ -0,0 +1,49 @@ +Handling of file:// URIs: + +This directory contains code to map basic boto connection, bucket, and key +operations onto files in the local filesystem, in support of file:// +URI operations. + +Bucket storage operations cannot be mapped completely onto a file system +because of the different naming semantics in these types of systems: the +former have a flat name space of objects within each named bucket; the +latter have a hierarchical name space of files, and nothing corresponding to +the notion of a bucket. The mapping we selected was guided by the desire +to achieve meaningful semantics for a useful subset of operations that can +be implemented polymorphically across both types of systems. We considered +several possibilities for mapping path names to bucket + object name: + +1) bucket = the file system root or local directory (for absolute vs +relative file:// URIs, respectively) and object = remainder of path. +We discarded this choice because the get_all_keys() method doesn't make +sense under this approach: Enumerating all files under the root or current +directory could include more than the caller intended. For example, +StorageUri("file:///usr/bin/X11/vim").get_all_keys() would enumerate all +files in the file system. + +2) bucket is treated mostly as an anonymous placeholder, with the object +name holding the URI path (minus the "file://" part). Two sub-options, +for object enumeration (the get_all_keys() call): + a) disallow get_all_keys(). This isn't great, as then the caller must + know the URI type before deciding whether to make this call. + b) return the single key for which this "bucket" was defined. + Note that this option means the app cannot use this API for listing + contents of the file system. While that makes the API less generally + useful, it avoids the potentially dangerous/unintended consequences + noted in option (1) above. + +We selected 2b, resulting in a class hierarchy where StorageUri is an abstract +class, with FileStorageUri and BucketStorageUri subclasses. + +Some additional notes: + +BucketStorageUri and FileStorageUri each implement these methods: + - clone_replace_name() creates a same-type URI with a + different object name - which is useful for various enumeration cases + (e.g., implementing wildcarding in a command line utility). + - names_container() determines if the given URI names a container for + multiple objects/files - i.e., a bucket or directory. + - names_singleton() determines if the given URI names an individual object + or file. + - is_file_uri() and is_cloud_uri() determine if the given URI is a + FileStorageUri or BucketStorageUri, respectively diff --git a/ext/boto/file/__init__.py b/ext/boto/file/__init__.py new file mode 100644 index 0000000000..837a164f47 --- /dev/null +++ b/ext/boto/file/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto + +from boto.file.connection import FileConnection as Connection +from boto.file.key import Key +from boto.file.bucket import Bucket + +__all__ = ['Connection', 'Key', 'Bucket'] diff --git a/ext/boto/file/bucket.py b/ext/boto/file/bucket.py new file mode 100644 index 0000000000..d49755eaae --- /dev/null +++ b/ext/boto/file/bucket.py @@ -0,0 +1,112 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of bucket, for use with "file://" URIs. + +import os +from boto.file.key import Key +from boto.file.simpleresultset import SimpleResultSet +from boto.s3.bucketlistresultset import BucketListResultSet + +class Bucket(object): + def __init__(self, name, contained_key): + """Instantiate an anonymous file-based Bucket around a single key. + """ + self.name = name + self.contained_key = contained_key + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __str__(self): + return 'anonymous bucket for file://' + self.contained_key + + def delete_key(self, key_name, headers=None, + version_id=None, mfa_token=None): + """ + Deletes a key from the bucket. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: Unused in this subclass. + + :type mfa_token: tuple or list of strings + :param mfa_token: Unused in this subclass. + """ + os.remove(key_name) + + def get_all_keys(self, headers=None, **params): + """ + This method returns the single key around which this anonymous Bucket + was instantiated. + + :rtype: SimpleResultSet + :return: The result from file system listing the keys requested + + """ + key = Key(self.name, self.contained_key) + return SimpleResultSet([key]) + + def get_key(self, key_name, headers=None, version_id=None, + key_type=Key.KEY_REGULAR_FILE): + """ + Check to see if a particular key exists within the bucket. + Returns: An instance of a Key object or None + + :type key_name: string + :param key_name: The name of the key to retrieve + + :type version_id: string + :param version_id: Unused in this subclass. + + :type stream_type: integer + :param stream_type: Type of the Key - Regular File or input/output Stream + + :rtype: :class:`boto.file.key.Key` + :returns: A Key object from this bucket. + """ + if key_name == '-': + return Key(self.name, '-', key_type=Key.KEY_STREAM_READABLE) + else: + fp = open(key_name, 'rb') + return Key(self.name, key_name, fp) + + def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.file.key.Key` + :returns: An instance of the newly created key object + """ + if key_name == '-': + return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE) + else: + dir_name = os.path.dirname(key_name) + if dir_name and not os.path.exists(dir_name): + os.makedirs(dir_name) + fp = open(key_name, 'wb') + return Key(self.name, key_name, fp) diff --git a/ext/boto/file/connection.py b/ext/boto/file/connection.py new file mode 100644 index 0000000000..2507e2db0b --- /dev/null +++ b/ext/boto/file/connection.py @@ -0,0 +1,33 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of connection, for use with "file://" URIs. + +from boto.file.bucket import Bucket + +class FileConnection(object): + + def __init__(self, file_storage_uri): + # FileConnections are per-file storage URI. + self.file_storage_uri = file_storage_uri + + def get_bucket(self, bucket_name, validate=True, headers=None): + return Bucket(bucket_name, self.file_storage_uri.object_name) diff --git a/ext/boto/file/key.py b/ext/boto/file/key.py new file mode 100644 index 0000000000..3ec345d464 --- /dev/null +++ b/ext/boto/file/key.py @@ -0,0 +1,201 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# File representation of key, for use with "file://" URIs. + +import os, shutil +import sys + +from boto.compat import StringIO + +class Key(object): + + KEY_STREAM_READABLE = 0x01 + KEY_STREAM_WRITABLE = 0x02 + KEY_STREAM = (KEY_STREAM_READABLE | KEY_STREAM_WRITABLE) + KEY_REGULAR_FILE = 0x00 + + def __init__(self, bucket, name, fp=None, key_type=KEY_REGULAR_FILE): + self.bucket = bucket + self.full_path = name + if name == '-': + self.name = None + self.size = None + else: + self.name = name + self.size = os.stat(name).st_size + self.key_type = key_type + if key_type == self.KEY_STREAM_READABLE: + self.fp = sys.stdin + self.full_path = '' + elif key_type == self.KEY_STREAM_WRITABLE: + self.fp = sys.stdout + self.full_path = '' + else: + self.fp = fp + + def __str__(self): + return 'file://' + self.full_path + + def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False): + """ + Retrieves a file from a Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: ignored in this subclass. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + """ + if self.key_type & self.KEY_STREAM_WRITABLE: + raise BotoClientError('Stream is not readable') + elif self.key_type & self.KEY_STREAM_READABLE: + key_file = self.fp + else: + key_file = open(self.full_path, 'rb') + try: + shutil.copyfileobj(key_file, fp) + finally: + key_file.close() + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, + num_cb=10, policy=None, md5=None): + """ + Store an object in a file using the name of the Key object as the + key in file URI and the contents of the file pointed to by 'fp' as the + contents. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: ignored in this subclass. + + :type replace: bool + :param replace: If this parameter is False, the method + will first check to see if an object exists in the + bucket with the same key. If it does, it won't + overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: ignored in this subclass. + + :type md5: A tuple containing the hexdigest version of the MD5 checksum + of the file as the first element and the Base64-encoded + version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + :param md5: ignored in this subclass. + """ + if self.key_type & self.KEY_STREAM_READABLE: + raise BotoClientError('Stream is not writable') + elif self.key_type & self.KEY_STREAM_WRITABLE: + key_file = self.fp + else: + if not replace and os.path.exists(self.full_path): + return + key_file = open(self.full_path, 'wb') + try: + shutil.copyfileobj(fp, key_file) + finally: + key_file.close() + + def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=None, + torrent=False, version_id=None, + res_download_handler=None, response_headers=None): + """ + Copy contents from the current file to the file pointed to by 'fp'. + + :type fp: File-like object + :param fp: + + :type headers: dict + :param headers: Unused in this subclass. + + :type cb: function + :param cb: Unused in this subclass. + + :type cb: int + :param num_cb: Unused in this subclass. + + :type torrent: bool + :param torrent: Unused in this subclass. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: Unused in this subclass. + + :type response_headers: dict + :param response_headers: Unused in this subclass. + """ + shutil.copyfileobj(self.fp, fp) + + def get_contents_as_string(self, headers=None, cb=None, num_cb=10, + torrent=False): + """ + Retrieve file data from the Key, and return contents as a string. + + :type headers: dict + :param headers: ignored in this subclass. + + :type cb: function + :param cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type cb: int + :param num_cb: ignored in this subclass. + + :type torrent: bool + :param torrent: ignored in this subclass. + + :rtype: string + :returns: The contents of the file as a string + """ + + fp = StringIO() + self.get_contents_to_file(fp) + return fp.getvalue() + + def is_stream(self): + return (self.key_type & self.KEY_STREAM) + + def close(self): + """ + Closes fp associated with underlying file. + Caller should call this method when done with this class, to avoid + using up OS resources (e.g., when iterating over a large number + of files). + """ + self.fp.close() diff --git a/ext/boto/file/simpleresultset.py b/ext/boto/file/simpleresultset.py new file mode 100644 index 0000000000..5f94dc1165 --- /dev/null +++ b/ext/boto/file/simpleresultset.py @@ -0,0 +1,30 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class SimpleResultSet(list): + """ + ResultSet facade built from a simple list, rather than via XML parsing. + """ + + def __init__(self, input_list): + for x in input_list: + self.append(x) + self.is_truncated = False diff --git a/ext/boto/fps/__init__.py b/ext/boto/fps/__init__.py new file mode 100644 index 0000000000..d69b7f08a4 --- /dev/null +++ b/ext/boto/fps/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/ext/boto/fps/connection.py b/ext/boto/fps/connection.py new file mode 100644 index 0000000000..6dc90a248e --- /dev/null +++ b/ext/boto/fps/connection.py @@ -0,0 +1,395 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import urllib +import uuid +from boto.connection import AWSQueryConnection +from boto.fps.exception import ResponseErrorFactory +from boto.fps.response import ResponseFactory +import boto.fps.response + +__all__ = ['FPSConnection'] + +decorated_attrs = ('action', 'response') + + +def add_attrs_from(func, to): + for attr in decorated_attrs: + setattr(to, attr, getattr(func, attr, None)) + return to + + +def complex_amounts(*fields): + def decorator(func): + def wrapper(self, *args, **kw): + for field in filter(kw.has_key, fields): + amount = kw.pop(field) + kw[field + '.Value'] = getattr(amount, 'Value', str(amount)) + kw[field + '.CurrencyCode'] = getattr(amount, 'CurrencyCode', + self.currencycode) + return func(self, *args, **kw) + wrapper.__doc__ = "{0}\nComplex Amounts: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda x: len(x) == len(filter(kw.has_key, x)) + if 1 != len(filter(hasgroup, groups)): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires {1} argument(s)" \ + "".format(getattr(func, 'action', 'Method'), message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def needs_caller_reference(func): + + def wrapper(*args, **kw): + kw.setdefault('CallerReference', uuid.uuid4()) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nUses CallerReference, defaults " \ + "to uuid.uuid4()".format(func.__doc__) + return add_attrs_from(func, to=wrapper) + + +def api_action(*api): + + def decorator(func): + action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) + response = ResponseFactory(action) + if hasattr(boto.fps.response, action + 'Response'): + response = getattr(boto.fps.response, action + 'Response') + + def wrapper(self, *args, **kw): + return func(self, action, response, *args, **kw) + wrapper.action, wrapper.response = action, response + wrapper.__doc__ = "FPS {0} API call\n{1}".format(action, + func.__doc__) + return wrapper + return decorator + + +class FPSConnection(AWSQueryConnection): + + APIVersion = '2010-08-28' + ResponseError = ResponseErrorFactory + currencycode = 'USD' + + def __init__(self, *args, **kw): + self.currencycode = kw.pop('CurrencyCode', self.currencycode) + kw.setdefault('host', 'fps.sandbox.amazonaws.com') + super(FPSConnection, self).__init__(*args, **kw) + + def _required_auth_capability(self): + return ['fps'] + + @needs_caller_reference + @complex_amounts('SettlementAmount') + @requires(['CreditInstrumentId', 'SettlementAmount.Value', + 'SenderTokenId', 'SettlementAmount.CurrencyCode']) + @api_action() + def settle_debt(self, action, response, **kw): + """ + Allows a caller to initiate a transaction that atomically transfers + money from a sender's payment instrument to the recipient, while + decreasing corresponding debt balance. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def get_transaction_status(self, action, response, **kw): + """ + Gets the latest status of a transaction. + """ + return self.get_object(action, kw, response) + + @requires(['StartDate']) + @api_action() + def get_account_activity(self, action, response, **kw): + """ + Returns transactions for a given date range. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def get_transaction(self, action, response, **kw): + """ + Returns all details of a transaction. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_outstanding_debt_balance(self, action, response): + """ + Returns the total outstanding balance for all the credit instruments + for the given creditor account. + """ + return self.get_object(action, {}, response) + + @requires(['PrepaidInstrumentId']) + @api_action() + def get_prepaid_balance(self, action, response, **kw): + """ + Returns the balance available on the given prepaid instrument. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_total_prepaid_liability(self, action, response): + """ + Returns the total liability held by the given account corresponding to + all the prepaid instruments owned by the account. + """ + return self.get_object(action, {}, response) + + @api_action() + def get_account_balance(self, action, response): + """ + Returns the account balance for an account in real time. + """ + return self.get_object(action, {}, response) + + @needs_caller_reference + @requires(['PaymentInstruction', 'TokenType']) + @api_action() + def install_payment_instruction(self, action, response, **kw): + """ + Installs a payment instruction for caller. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @requires(['returnURL', 'pipelineName']) + def cbui_url(self, **kw): + """ + Generate a signed URL for the Co-Branded service API given arguments as + payload. + """ + sandbox = 'sandbox' in self.host and 'payments-sandbox' or 'payments' + endpoint = 'authorize.{0}.amazon.com'.format(sandbox) + base = '/cobranded-ui/actions/start' + + validpipelines = ('SingleUse', 'MultiUse', 'Recurring', 'Recipient', + 'SetupPrepaid', 'SetupPostpaid', 'EditToken') + assert kw['pipelineName'] in validpipelines, "Invalid pipelineName" + kw.update({ + 'signatureMethod': 'HmacSHA256', + 'signatureVersion': '2', + }) + kw.setdefault('callerKey', self.aws_access_key_id) + + safestr = lambda x: x is not None and str(x) or '' + safequote = lambda x: urllib.quote(safestr(x), safe='~') + payload = sorted([(k, safequote(v)) for k, v in kw.items()]) + + encoded = lambda p: '&'.join([k + '=' + v for k, v in p]) + canonical = '\n'.join(['GET', endpoint, base, encoded(payload)]) + signature = self._auth_handler.sign_string(canonical) + payload += [('signature', safequote(signature))] + payload.sort() + + return 'https://{0}{1}?{2}'.format(endpoint, base, encoded(payload)) + + @needs_caller_reference + @complex_amounts('TransactionAmount') + @requires(['SenderTokenId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def reserve(self, action, response, **kw): + """ + Reserve API is part of the Reserve and Settle API conjunction that + serve the purpose of a pay where the authorization and settlement have + a timing difference. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('TransactionAmount') + @requires(['SenderTokenId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def pay(self, action, response, **kw): + """ + Allows calling applications to move money from a sender to a recipient. + """ + return self.get_object(action, kw, response) + + @requires(['TransactionId']) + @api_action() + def cancel(self, action, response, **kw): + """ + Cancels an ongoing transaction and puts it in cancelled state. + """ + return self.get_object(action, kw, response) + + @complex_amounts('TransactionAmount') + @requires(['ReserveTransactionId', 'TransactionAmount.Value', + 'TransactionAmount.CurrencyCode']) + @api_action() + def settle(self, action, response, **kw): + """ + The Settle API is used in conjunction with the Reserve API and is used + to settle previously reserved transaction. + """ + return self.get_object(action, kw, response) + + @complex_amounts('RefundAmount') + @requires(['TransactionId', 'RefundAmount.Value', + 'CallerReference', 'RefundAmount.CurrencyCode']) + @api_action() + def refund(self, action, response, **kw): + """ + Refunds a previously completed transaction. + """ + return self.get_object(action, kw, response) + + @requires(['RecipientTokenId']) + @api_action() + def get_recipient_verification_status(self, action, response, **kw): + """ + Returns the recipient status. + """ + return self.get_object(action, kw, response) + + @requires(['CallerReference'], ['TokenId']) + @api_action() + def get_token_by_caller(self, action, response, **kw): + """ + Returns the details of a particular token installed by this calling + application using the subway co-branded UI. + """ + return self.get_object(action, kw, response) + + @requires(['UrlEndPoint', 'HttpParameters']) + @api_action() + def verify_signature(self, action, response, **kw): + """ + Verify the signature that FPS sent in IPN or callback urls. + """ + return self.get_object(action, kw, response) + + @api_action() + def get_tokens(self, action, response, **kw): + """ + Returns a list of tokens installed on the given account. + """ + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def get_token_usage(self, action, response, **kw): + """ + Returns the usage of a token. + """ + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def cancel_token(self, action, response, **kw): + """ + Cancels any token installed by the calling application on its own + account. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('FundingAmount') + @requires(['PrepaidInstrumentId', 'FundingAmount.Value', + 'SenderTokenId', 'FundingAmount.CurrencyCode']) + @api_action() + def fund_prepaid(self, action, response, **kw): + """ + Funds the prepaid balance on the given prepaid instrument. + """ + return self.get_object(action, kw, response) + + @requires(['CreditInstrumentId']) + @api_action() + def get_debt_balance(self, action, response, **kw): + """ + Returns the balance corresponding to the given credit instrument. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('AdjustmentAmount') + @requires(['CreditInstrumentId', 'AdjustmentAmount.Value', + 'AdjustmentAmount.CurrencyCode']) + @api_action() + def write_off_debt(self, action, response, **kw): + """ + Allows a creditor to write off the debt balance accumulated partially + or fully at any time. + """ + return self.get_object(action, kw, response) + + @requires(['SubscriptionId']) + @api_action() + def get_transactions_for_subscription(self, action, response, **kw): + """ + Returns the transactions for a given subscriptionID. + """ + return self.get_object(action, kw, response) + + @requires(['SubscriptionId']) + @api_action() + def get_subscription_details(self, action, response, **kw): + """ + Returns the details of Subscription for a given subscriptionID. + """ + return self.get_object(action, kw, response) + + @needs_caller_reference + @complex_amounts('RefundAmount') + @requires(['SubscriptionId']) + @api_action() + def cancel_subscription_and_refund(self, action, response, **kw): + """ + Cancels a subscription. + """ + message = "If you specify a RefundAmount, " \ + "you must specify CallerReference." + assert not 'RefundAmount.Value' in kw \ + or 'CallerReference' in kw, message + return self.get_object(action, kw, response) + + @requires(['TokenId']) + @api_action() + def get_payment_instruction(self, action, response, **kw): + """ + Gets the payment instruction of a token. + """ + return self.get_object(action, kw, response) diff --git a/ext/boto/fps/exception.py b/ext/boto/fps/exception.py new file mode 100644 index 0000000000..bebb86b787 --- /dev/null +++ b/ext/boto/fps/exception.py @@ -0,0 +1,344 @@ +from boto.exception import BotoServerError + + +class ResponseErrorFactory(BotoServerError): + + def __new__(cls, *args, **kw): + error = BotoServerError(*args, **kw) + newclass = globals().get(error.error_code, ResponseError) + obj = newclass.__new__(newclass, *args, **kw) + obj.__dict__.update(error.__dict__) + return obj + + +class ResponseError(BotoServerError): + """Undefined response error. + """ + retry = False + + def __repr__(self): + return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, + self.status, self.reason, + self.error_message) + + def __str__(self): + return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ + '{2}\n' \ + '{0.error_message}'.format(self, + self.retry and '(Retriable)' or '', + self.__doc__.strip()) + + +class RetriableResponseError(ResponseError): + retry = True + + +class AccessFailure(RetriableResponseError): + """Account cannot be accessed. + """ + + +class AccountClosed(RetriableResponseError): + """Account is not active. + """ + + +class AccountLimitsExceeded(RetriableResponseError): + """The spending or receiving limit on the account is exceeded. + """ + + +class AmountOutOfRange(ResponseError): + """The transaction amount is more than the allowed range. + """ + + +class AuthFailure(RetriableResponseError): + """AWS was not able to validate the provided access credentials. + """ + + +class ConcurrentModification(RetriableResponseError): + """A retriable error can happen when two processes try to modify the + same data at the same time. + """ + + +class DuplicateRequest(ResponseError): + """A different request associated with this caller reference already + exists. + """ + + +class InactiveInstrument(ResponseError): + """Payment instrument is inactive. + """ + + +class IncompatibleTokens(ResponseError): + """The transaction could not be completed because the tokens have + incompatible payment instructions. + """ + + +class InstrumentAccessDenied(ResponseError): + """The external calling application is not the recipient for this + postpaid or prepaid instrument. + """ + + +class InstrumentExpired(ResponseError): + """The prepaid or the postpaid instrument has expired. + """ + + +class InsufficientBalance(RetriableResponseError): + """The sender, caller, or recipient's account balance has + insufficient funds to complete the transaction. + """ + + +class InternalError(RetriableResponseError): + """A retriable error that happens due to some transient problem in + the system. + """ + + +class InvalidAccountState(RetriableResponseError): + """The account is either suspended or closed. + """ + + +class InvalidAccountState_Caller(RetriableResponseError): + """The developer account cannot participate in the transaction. + """ + + +class InvalidAccountState_Recipient(RetriableResponseError): + """Recipient account cannot participate in the transaction. + """ + + +class InvalidAccountState_Sender(RetriableResponseError): + """Sender account cannot participate in the transaction. + """ + + +class InvalidCallerReference(ResponseError): + """The Caller Reference does not have a token associated with it. + """ + + +class InvalidClientTokenId(ResponseError): + """The AWS Access Key Id you provided does not exist in our records. + """ + + +class InvalidDateRange(ResponseError): + """The end date specified is before the start date or the start date + is in the future. + """ + + +class InvalidParams(ResponseError): + """One or more parameters in the request is invalid. + """ + + +class InvalidPaymentInstrument(ResponseError): + """The payment method used in the transaction is invalid. + """ + + +class InvalidPaymentMethod(ResponseError): + """Specify correct payment method. + """ + + +class InvalidRecipientForCCTransaction(ResponseError): + """This account cannot receive credit card payments. + """ + + +class InvalidSenderRoleForAccountType(ResponseError): + """This token cannot be used for this operation. + """ + + +class InvalidTokenId(ResponseError): + """You did not install the token that you are trying to cancel. + """ + + +class InvalidTokenId_Recipient(ResponseError): + """The recipient token specified is either invalid or canceled. + """ + + +class InvalidTokenId_Sender(ResponseError): + """The sender token specified is either invalid or canceled or the + token is not active. + """ + + +class InvalidTokenType(ResponseError): + """An invalid operation was performed on the token, for example, + getting the token usage information on a single use token. + """ + + +class InvalidTransactionId(ResponseError): + """The specified transaction could not be found or the caller did not + execute the transaction or this is not a Pay or Reserve call. + """ + + +class InvalidTransactionState(ResponseError): + """The transaction is not complete, or it has temporarily failed. + """ + + +class NotMarketplaceApp(RetriableResponseError): + """This is not an marketplace application or the caller does not + match either the sender or the recipient. + """ + + +class OriginalTransactionFailed(ResponseError): + """The original transaction has failed. + """ + + +class OriginalTransactionIncomplete(RetriableResponseError): + """The original transaction is still in progress. + """ + + +class PaymentInstrumentNotCC(ResponseError): + """The payment method specified in the transaction is not a credit + card. You can only use a credit card for this transaction. + """ + + +class PaymentMethodNotDefined(ResponseError): + """Payment method is not defined in the transaction. + """ + + +class PrepaidFundingLimitExceeded(RetriableResponseError): + """An attempt has been made to fund the prepaid instrument + at a level greater than its recharge limit. + """ + + +class RefundAmountExceeded(ResponseError): + """The refund amount is more than the refundable amount. + """ + + +class SameSenderAndRecipient(ResponseError): + """The sender and receiver are identical, which is not allowed. + """ + + +class SameTokenIdUsedMultipleTimes(ResponseError): + """This token is already used in earlier transactions. + """ + + +class SenderNotOriginalRecipient(ResponseError): + """The sender in the refund transaction is not + the recipient of the original transaction. + """ + + +class SettleAmountGreaterThanDebt(ResponseError): + """The amount being settled or written off is + greater than the current debt. + """ + + +class SettleAmountGreaterThanReserveAmount(ResponseError): + """The amount being settled is greater than the reserved amount. + """ + + +class SignatureDoesNotMatch(ResponseError): + """The request signature calculated by Amazon does not match the + signature you provided. + """ + + +class TokenAccessDenied(ResponseError): + """Permission to cancel the token is denied. + """ + + +class TokenNotActive(ResponseError): + """The token is canceled. + """ + + +class TokenNotActive_Recipient(ResponseError): + """The recipient token is canceled. + """ + + +class TokenNotActive_Sender(ResponseError): + """The sender token is canceled. + """ + + +class TokenUsageError(ResponseError): + """The token usage limit is exceeded. + """ + + +class TransactionDenied(ResponseError): + """The transaction is not allowed. + """ + + +class TransactionFullyRefundedAlready(ResponseError): + """The transaction has already been completely refunded. + """ + + +class TransactionTypeNotRefundable(ResponseError): + """You cannot refund this transaction. + """ + + +class UnverifiedAccount_Recipient(ResponseError): + """The recipient's account must have a verified bank account or a + credit card before this transaction can be initiated. + """ + + +class UnverifiedAccount_Sender(ResponseError): + """The sender's account must have a verified U.S. credit card or + a verified U.S bank account before this transaction can be + initiated. + """ + + +class UnverifiedBankAccount(ResponseError): + """A verified bank account should be used for this transaction. + """ + + +class UnverifiedEmailAddress_Caller(ResponseError): + """The caller account must have a verified email address. + """ + + +class UnverifiedEmailAddress_Recipient(ResponseError): + """The recipient account must have a verified + email address for receiving payments. + """ + + +class UnverifiedEmailAddress_Sender(ResponseError): + """The sender account must have a verified + email address for this payment. + """ diff --git a/ext/boto/fps/response.py b/ext/boto/fps/response.py new file mode 100644 index 0000000000..c0a9e2837f --- /dev/null +++ b/ext/boto/fps/response.py @@ -0,0 +1,207 @@ +# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ +# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from decimal import Decimal +from boto.compat import filter, map + + +def ResponseFactory(action): + class FPSResponse(Response): + _action = action + _Result = globals().get(action + 'Result', ResponseElement) + + # due to nodes receiving their closing tags + def endElement(self, name, value, connection): + if name != action + 'Response': + super(FPSResponse, self).endElement(name, value, connection) + return FPSResponse + + +class ResponseElement(object): + def __init__(self, connection=None, name=None): + if connection is not None: + self._connection = connection + self._name = name or self.__class__.__name__ + + @property + def connection(self): + return self._connection + + def __repr__(self): + render = lambda pair: '{!s}: {!r}'.format(*pair) + do_show = lambda pair: not pair[0].startswith('_') + attrs = filter(do_show, self.__dict__.items()) + return '{0}({1})'.format(self.__class__.__name__, + ', '.join(map(render, attrs))) + + def startElement(self, name, attrs, connection): + return None + + # due to nodes receiving their closing tags + def endElement(self, name, value, connection): + if name != self._name: + setattr(self, name, value) + + +class Response(ResponseElement): + _action = 'Undefined' + + def startElement(self, name, attrs, connection): + if name == 'ResponseMetadata': + setattr(self, name, ResponseElement(name=name)) + elif name == self._action + 'Result': + setattr(self, name, self._Result(name=name)) + else: + return super(Response, self).startElement(name, attrs, connection) + return getattr(self, name) + + +class ComplexAmount(ResponseElement): + def __repr__(self): + return '{0} {1}'.format(self.CurrencyCode, self.Value) + + def __float__(self): + return float(self.Value) + + def __str__(self): + return str(self.Value) + + def startElement(self, name, attrs, connection): + if name not in ('CurrencyCode', 'Value'): + message = 'Unrecognized tag {0} in ComplexAmount'.format(name) + raise AssertionError(message) + return super(ComplexAmount, self).startElement(name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Value': + value = Decimal(value) + super(ComplexAmount, self).endElement(name, value, connection) + + +class AmountCollection(ResponseElement): + def startElement(self, name, attrs, connection): + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + + +class AccountBalance(AmountCollection): + def startElement(self, name, attrs, connection): + if name == 'AvailableBalances': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(AccountBalance, self).startElement(name, attrs, connection) + + +class GetAccountBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'AccountBalance': + setattr(self, name, AccountBalance(name=name)) + return getattr(self, name) + return super(GetAccountBalanceResult, self).startElement(name, attrs, + connection) + + +class GetTotalPrepaidLiabilityResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'OutstandingPrepaidLiability': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetTotalPrepaidLiabilityResult, self).startElement(name, + attrs, connection) + + +class GetPrepaidBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'PrepaidBalance': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetPrepaidBalanceResult, self).startElement(name, attrs, + connection) + + +class GetOutstandingDebtBalanceResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'OutstandingDebt': + setattr(self, name, AmountCollection(name=name)) + return getattr(self, name) + return super(GetOutstandingDebtBalanceResult, self).startElement(name, + attrs, connection) + + +class TransactionPart(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'FeesPaid': + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + return super(TransactionPart, self).startElement(name, attrs, + connection) + + +class Transaction(ResponseElement): + def __init__(self, *args, **kw): + self.TransactionPart = [] + super(Transaction, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'TransactionPart': + getattr(self, name).append(TransactionPart(name=name)) + return getattr(self, name)[-1] + if name in ('TransactionAmount', 'FPSFees', 'Balance'): + setattr(self, name, ComplexAmount(name=name)) + return getattr(self, name) + return super(Transaction, self).startElement(name, attrs, connection) + + +class GetAccountActivityResult(ResponseElement): + def __init__(self, *args, **kw): + self.Transaction = [] + super(GetAccountActivityResult, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'Transaction': + getattr(self, name).append(Transaction(name=name)) + return getattr(self, name)[-1] + return super(GetAccountActivityResult, self).startElement(name, attrs, + connection) + + +class GetTransactionResult(ResponseElement): + def startElement(self, name, attrs, connection): + if name == 'Transaction': + setattr(self, name, Transaction(name=name)) + return getattr(self, name) + return super(GetTransactionResult, self).startElement(name, attrs, + connection) + + +class GetTokensResult(ResponseElement): + def __init__(self, *args, **kw): + self.Token = [] + super(GetTokensResult, self).__init__(*args, **kw) + + def startElement(self, name, attrs, connection): + if name == 'Token': + getattr(self, name).append(ResponseElement(name=name)) + return getattr(self, name)[-1] + return super(GetTokensResult, self).startElement(name, attrs, + connection) diff --git a/ext/boto/glacier/__init__.py b/ext/boto/glacier/__init__.py new file mode 100644 index 0000000000..8978b554cb --- /dev/null +++ b/ext/boto/glacier/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Glacier service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.glacier.layer2 import Layer2 + return get_regions('glacier', connection_cls=Layer2) + + +def connect_to_region(region_name, **kw_params): + from boto.glacier.layer2 import Layer2 + return connect('glacier', region_name, connection_cls=Layer2, **kw_params) diff --git a/ext/boto/glacier/concurrent.py b/ext/boto/glacier/concurrent.py new file mode 100644 index 0000000000..a4f3a224a0 --- /dev/null +++ b/ext/boto/glacier/concurrent.py @@ -0,0 +1,425 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import math +import threading +import hashlib +import time +import logging +from boto.compat import Queue +import binascii + +from boto.glacier.utils import DEFAULT_PART_SIZE, minimum_part_size, \ + chunk_hashes, tree_hash, bytes_to_hex +from boto.glacier.exceptions import UploadArchiveError, \ + DownloadArchiveError, \ + TreeHashDoesNotMatchError + + +_END_SENTINEL = object() +log = logging.getLogger('boto.glacier.concurrent') + + +class ConcurrentTransferer(object): + def __init__(self, part_size=DEFAULT_PART_SIZE, num_threads=10): + self._part_size = part_size + self._num_threads = num_threads + self._threads = [] + + def _calculate_required_part_size(self, total_size): + min_part_size_required = minimum_part_size(total_size) + if self._part_size >= min_part_size_required: + part_size = self._part_size + else: + part_size = min_part_size_required + log.debug("The part size specified (%s) is smaller than " + "the minimum required part size. Using a part " + "size of: %s", self._part_size, part_size) + total_parts = int(math.ceil(total_size / float(part_size))) + return total_parts, part_size + + def _shutdown_threads(self): + log.debug("Shutting down threads.") + for thread in self._threads: + thread.should_continue = False + for thread in self._threads: + thread.join() + log.debug("Threads have exited.") + + def _add_work_items_to_queue(self, total_parts, worker_queue, part_size): + log.debug("Adding work items to queue.") + for i in range(total_parts): + worker_queue.put((i, part_size)) + for i in range(self._num_threads): + worker_queue.put(_END_SENTINEL) + + +class ConcurrentUploader(ConcurrentTransferer): + """Concurrently upload an archive to glacier. + + This class uses a thread pool to concurrently upload an archive + to glacier using the multipart upload API. + + The threadpool is completely managed by this class and is + transparent to the users of this class. + + """ + def __init__(self, api, vault_name, part_size=DEFAULT_PART_SIZE, + num_threads=10): + """ + :type api: :class:`boto.glacier.layer1.Layer1` + :param api: A layer1 glacier object. + + :type vault_name: str + :param vault_name: The name of the vault. + + :type part_size: int + :param part_size: The size, in bytes, of the chunks to use when uploading + the archive parts. The part size must be a megabyte multiplied by + a power of two. + + :type num_threads: int + :param num_threads: The number of threads to spawn for the thread pool. + The number of threads will control how much parts are being + concurrently uploaded. + + """ + super(ConcurrentUploader, self).__init__(part_size, num_threads) + self._api = api + self._vault_name = vault_name + + def upload(self, filename, description=None): + """Concurrently create an archive. + + The part_size value specified when the class was constructed + will be used *unless* it is smaller than the minimum required + part size needed for the size of the given file. In that case, + the part size used will be the minimum part size required + to properly upload the given file. + + :type file: str + :param file: The filename to upload + + :type description: str + :param description: The description of the archive. + + :rtype: str + :return: The archive id of the newly created archive. + + """ + total_size = os.stat(filename).st_size + total_parts, part_size = self._calculate_required_part_size(total_size) + hash_chunks = [None] * total_parts + worker_queue = Queue() + result_queue = Queue() + response = self._api.initiate_multipart_upload(self._vault_name, + part_size, + description) + upload_id = response['UploadId'] + # The basic idea is to add the chunks (the offsets not the actual + # contents) to a work queue, start up a thread pool, let the crank + # through the items in the work queue, and then place their results + # in a result queue which we use to complete the multipart upload. + self._add_work_items_to_queue(total_parts, worker_queue, part_size) + self._start_upload_threads(result_queue, upload_id, + worker_queue, filename) + try: + self._wait_for_upload_threads(hash_chunks, result_queue, + total_parts) + except UploadArchiveError as e: + log.debug("An error occurred while uploading an archive, " + "aborting multipart upload.") + self._api.abort_multipart_upload(self._vault_name, upload_id) + raise e + log.debug("Completing upload.") + response = self._api.complete_multipart_upload( + self._vault_name, upload_id, bytes_to_hex(tree_hash(hash_chunks)), + total_size) + log.debug("Upload finished.") + return response['ArchiveId'] + + def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts): + for _ in range(total_parts): + result = result_queue.get() + if isinstance(result, Exception): + log.debug("An error was found in the result queue, terminating " + "threads: %s", result) + self._shutdown_threads() + raise UploadArchiveError("An error occurred while uploading " + "an archive: %s" % result) + # Each unit of work returns the tree hash for the given part + # number, which we use at the end to compute the tree hash of + # the entire archive. + part_number, tree_sha256 = result + hash_chunks[part_number] = tree_sha256 + self._shutdown_threads() + + def _start_upload_threads(self, result_queue, upload_id, worker_queue, + filename): + log.debug("Starting threads.") + for _ in range(self._num_threads): + thread = UploadWorkerThread(self._api, self._vault_name, filename, + upload_id, worker_queue, result_queue) + time.sleep(0.2) + thread.start() + self._threads.append(thread) + + +class TransferThread(threading.Thread): + def __init__(self, worker_queue, result_queue): + super(TransferThread, self).__init__() + self._worker_queue = worker_queue + self._result_queue = result_queue + # This value can be set externally by other objects + # to indicate that the thread should be shut down. + self.should_continue = True + + def run(self): + while self.should_continue: + try: + work = self._worker_queue.get(timeout=1) + except Empty: + continue + if work is _END_SENTINEL: + self._cleanup() + return + result = self._process_chunk(work) + self._result_queue.put(result) + self._cleanup() + + def _process_chunk(self, work): + pass + + def _cleanup(self): + pass + + +class UploadWorkerThread(TransferThread): + def __init__(self, api, vault_name, filename, upload_id, + worker_queue, result_queue, num_retries=5, + time_between_retries=5, + retry_exceptions=Exception): + super(UploadWorkerThread, self).__init__(worker_queue, result_queue) + self._api = api + self._vault_name = vault_name + self._filename = filename + self._fileobj = open(filename, 'rb') + self._upload_id = upload_id + self._num_retries = num_retries + self._time_between_retries = time_between_retries + self._retry_exceptions = retry_exceptions + + def _process_chunk(self, work): + result = None + for i in range(self._num_retries + 1): + try: + result = self._upload_chunk(work) + break + except self._retry_exceptions as e: + log.error("Exception caught uploading part number %s for " + "vault %s, attempt: (%s / %s), filename: %s, " + "exception: %s, msg: %s", + work[0], self._vault_name, i + 1, self._num_retries + 1, + self._filename, e.__class__, e) + time.sleep(self._time_between_retries) + result = e + return result + + def _upload_chunk(self, work): + part_number, part_size = work + start_byte = part_number * part_size + self._fileobj.seek(start_byte) + contents = self._fileobj.read(part_size) + linear_hash = hashlib.sha256(contents).hexdigest() + tree_hash_bytes = tree_hash(chunk_hashes(contents)) + byte_range = (start_byte, start_byte + len(contents) - 1) + log.debug("Uploading chunk %s of size %s", part_number, part_size) + response = self._api.upload_part(self._vault_name, self._upload_id, + linear_hash, + bytes_to_hex(tree_hash_bytes), + byte_range, contents) + # Reading the response allows the connection to be reused. + response.read() + return (part_number, tree_hash_bytes) + + def _cleanup(self): + self._fileobj.close() + + +class ConcurrentDownloader(ConcurrentTransferer): + """ + Concurrently download an archive from glacier. + + This class uses a thread pool to concurrently download an archive + from glacier. + + The threadpool is completely managed by this class and is + transparent to the users of this class. + + """ + def __init__(self, job, part_size=DEFAULT_PART_SIZE, + num_threads=10): + """ + :param job: A layer2 job object for archive retrieval object. + + :param part_size: The size, in bytes, of the chunks to use when uploading + the archive parts. The part size must be a megabyte multiplied by + a power of two. + + """ + super(ConcurrentDownloader, self).__init__(part_size, num_threads) + self._job = job + + def download(self, filename): + """ + Concurrently download an archive. + + :param filename: The filename to download the archive to + :type filename: str + + """ + total_size = self._job.archive_size + total_parts, part_size = self._calculate_required_part_size(total_size) + worker_queue = Queue() + result_queue = Queue() + self._add_work_items_to_queue(total_parts, worker_queue, part_size) + self._start_download_threads(result_queue, worker_queue) + try: + self._wait_for_download_threads(filename, result_queue, total_parts) + except DownloadArchiveError as e: + log.debug("An error occurred while downloading an archive: %s", e) + raise e + log.debug("Download completed.") + + def _wait_for_download_threads(self, filename, result_queue, total_parts): + """ + Waits until the result_queue is filled with all the downloaded parts + This indicates that all part downloads have completed + + Saves downloaded parts into filename + + :param filename: + :param result_queue: + :param total_parts: + """ + hash_chunks = [None] * total_parts + with open(filename, "wb") as f: + for _ in range(total_parts): + result = result_queue.get() + if isinstance(result, Exception): + log.debug("An error was found in the result queue, " + "terminating threads: %s", result) + self._shutdown_threads() + raise DownloadArchiveError( + "An error occurred while uploading " + "an archive: %s" % result) + part_number, part_size, actual_hash, data = result + hash_chunks[part_number] = actual_hash + start_byte = part_number * part_size + f.seek(start_byte) + f.write(data) + f.flush() + final_hash = bytes_to_hex(tree_hash(hash_chunks)) + log.debug("Verifying final tree hash of archive, expecting: %s, " + "actual: %s", self._job.sha256_treehash, final_hash) + if self._job.sha256_treehash != final_hash: + self._shutdown_threads() + raise TreeHashDoesNotMatchError( + "Tree hash for entire archive does not match, " + "expected: %s, got: %s" % (self._job.sha256_treehash, + final_hash)) + self._shutdown_threads() + + def _start_download_threads(self, result_queue, worker_queue): + log.debug("Starting threads.") + for _ in range(self._num_threads): + thread = DownloadWorkerThread(self._job, worker_queue, result_queue) + time.sleep(0.2) + thread.start() + self._threads.append(thread) + + +class DownloadWorkerThread(TransferThread): + def __init__(self, job, + worker_queue, result_queue, + num_retries=5, + time_between_retries=5, + retry_exceptions=Exception): + """ + Individual download thread that will download parts of the file from Glacier. Parts + to download stored in work queue. + + Parts download to a temp dir with each part a separate file + + :param job: Glacier job object + :param work_queue: A queue of tuples which include the part_number and + part_size + :param result_queue: A priority queue of tuples which include the + part_number and the path to the temp file that holds that + part's data. + + """ + super(DownloadWorkerThread, self).__init__(worker_queue, result_queue) + self._job = job + self._num_retries = num_retries + self._time_between_retries = time_between_retries + self._retry_exceptions = retry_exceptions + + def _process_chunk(self, work): + """ + Attempt to download a part of the archive from Glacier + Store the result in the result_queue + + :param work: + """ + result = None + for _ in range(self._num_retries): + try: + result = self._download_chunk(work) + break + except self._retry_exceptions as e: + log.error("Exception caught downloading part number %s for " + "job %s", work[0], self._job,) + time.sleep(self._time_between_retries) + result = e + return result + + def _download_chunk(self, work): + """ + Downloads a chunk of archive from Glacier. Saves the data to a temp file + Returns the part number and temp file location + + :param work: + """ + part_number, part_size = work + start_byte = part_number * part_size + byte_range = (start_byte, start_byte + part_size - 1) + log.debug("Downloading chunk %s of size %s", part_number, part_size) + response = self._job.get_output(byte_range) + data = response.read() + actual_hash = bytes_to_hex(tree_hash(chunk_hashes(data))) + if response['TreeHash'] != actual_hash: + raise TreeHashDoesNotMatchError( + "Tree hash for part number %s does not match, " + "expected: %s, got: %s" % (part_number, response['TreeHash'], + actual_hash)) + return (part_number, part_size, binascii.unhexlify(actual_hash), data) diff --git a/ext/boto/glacier/exceptions.py b/ext/boto/glacier/exceptions.py new file mode 100644 index 0000000000..c8bce1fe9b --- /dev/null +++ b/ext/boto/glacier/exceptions.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json + + +class UnexpectedHTTPResponseError(Exception): + def __init__(self, expected_responses, response): + self.status = response.status + self.body = response.read() + self.code = None + try: + body = json.loads(self.body) + self.code = body["code"] + msg = 'Expected %s, got ' % expected_responses + msg += '(%d, code=%s, message=%s)' % (response.status, + self.code, + body["message"]) + except Exception: + msg = 'Expected %s, got (%d, %s)' % (expected_responses, + response.status, + self.body) + super(UnexpectedHTTPResponseError, self).__init__(msg) + + +class ArchiveError(Exception): + pass + + +class UploadArchiveError(ArchiveError): + pass + + +class DownloadArchiveError(ArchiveError): + pass + + +class TreeHashDoesNotMatchError(ArchiveError): + pass diff --git a/ext/boto/glacier/job.py b/ext/boto/glacier/job.py new file mode 100644 index 0000000000..33e66a196c --- /dev/null +++ b/ext/boto/glacier/job.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import math +import socket + +from boto.glacier.exceptions import TreeHashDoesNotMatchError, \ + DownloadArchiveError +from boto.glacier.utils import tree_hash_from_str + + +class Job(object): + + DefaultPartSize = 4 * 1024 * 1024 + + ResponseDataElements = (('Action', 'action', None), + ('ArchiveId', 'archive_id', None), + ('ArchiveSizeInBytes', 'archive_size', 0), + ('Completed', 'completed', False), + ('CompletionDate', 'completion_date', None), + ('CreationDate', 'creation_date', None), + ('InventorySizeInBytes', 'inventory_size', 0), + ('JobDescription', 'description', None), + ('JobId', 'id', None), + ('SHA256TreeHash', 'sha256_treehash', None), + ('SNSTopic', 'sns_topic', None), + ('StatusCode', 'status_code', None), + ('StatusMessage', 'status_message', None), + ('VaultARN', 'arn', None)) + + def __init__(self, vault, response_data=None): + self.vault = vault + if response_data: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, response_data[response_name]) + else: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, default) + + def __repr__(self): + return 'Job(%s)' % self.arn + + def get_output(self, byte_range=None, validate_checksum=False): + """ + This operation downloads the output of the job. Depending on + the job type you specified when you initiated the job, the + output will be either the content of an archive or a vault + inventory. + + You can download all the job output or download a portion of + the output by specifying a byte range. In the case of an + archive retrieval job, depending on the byte range you + specify, Amazon Glacier returns the checksum for the portion + of the data. You can compute the checksum on the client and + verify that the values match to ensure the portion you + downloaded is the correct data. + + :type byte_range: tuple + :param range: A tuple of integer specifying the slice (in bytes) + of the archive you want to receive + + :type validate_checksum: bool + :param validate_checksum: Specify whether or not to validate + the associate tree hash. If the response does not contain + a TreeHash, then no checksum will be verified. + + """ + response = self.vault.layer1.get_job_output(self.vault.name, + self.id, + byte_range) + if validate_checksum and 'TreeHash' in response: + data = response.read() + actual_tree_hash = tree_hash_from_str(data) + if response['TreeHash'] != actual_tree_hash: + raise TreeHashDoesNotMatchError( + "The calculated tree hash %s does not match the " + "expected tree hash %s for the byte range %s" % ( + actual_tree_hash, response['TreeHash'], byte_range)) + return response + + def _calc_num_chunks(self, chunk_size): + return int(math.ceil(self.archive_size / float(chunk_size))) + + def download_to_file(self, filename, chunk_size=DefaultPartSize, + verify_hashes=True, retry_exceptions=(socket.error,)): + """Download an archive to a file by name. + + :type filename: str + :param filename: The name of the file where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + with open(filename, 'wb') as output_file: + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + + def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize, + verify_hashes=True, + retry_exceptions=(socket.error,)): + """Download an archive to a file object. + + :type output_file: file + :param output_file: The file object where the archive + contents will be saved. + + :type chunk_size: int + :param chunk_size: The chunk size to use when downloading + the archive. + + :type verify_hashes: bool + :param verify_hashes: Indicates whether or not to verify + the tree hashes for each downloaded chunk. + + """ + num_chunks = self._calc_num_chunks(chunk_size) + self._download_to_fileob(output_file, num_chunks, chunk_size, + verify_hashes, retry_exceptions) + + def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes, + retry_exceptions): + for i in range(num_chunks): + byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1) + data, expected_tree_hash = self._download_byte_range( + byte_range, retry_exceptions) + if verify_hashes: + actual_tree_hash = tree_hash_from_str(data) + if expected_tree_hash != actual_tree_hash: + raise TreeHashDoesNotMatchError( + "The calculated tree hash %s does not match the " + "expected tree hash %s for the byte range %s" % ( + actual_tree_hash, expected_tree_hash, byte_range)) + fileobj.write(data) + + def _download_byte_range(self, byte_range, retry_exceptions): + # You can occasionally get socket.errors when downloading + # chunks from Glacier, so each chunk can be retried up + # to 5 times. + for _ in range(5): + try: + response = self.get_output(byte_range) + data = response.read() + expected_tree_hash = response['TreeHash'] + return data, expected_tree_hash + except retry_exceptions as e: + continue + else: + raise DownloadArchiveError("There was an error downloading" + "byte range %s: %s" % (byte_range, + e)) diff --git a/ext/boto/glacier/layer1.py b/ext/boto/glacier/layer1.py new file mode 100644 index 0000000000..056d3b350a --- /dev/null +++ b/ext/boto/glacier/layer1.py @@ -0,0 +1,1279 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import os + +import boto.glacier +from boto.compat import json +from boto.connection import AWSAuthConnection +from boto.glacier.exceptions import UnexpectedHTTPResponseError +from boto.glacier.response import GlacierResponse +from boto.glacier.utils import ResettingFileSender + + +class Layer1(AWSAuthConnection): + """ + Amazon Glacier is a storage solution for "cold data." + + Amazon Glacier is an extremely low-cost storage service that + provides secure, durable and easy-to-use storage for data backup + and archival. With Amazon Glacier, customers can store their data + cost effectively for months, years, or decades. Amazon Glacier + also enables customers to offload the administrative burdens of + operating and scaling storage to AWS, so they don't have to worry + about capacity planning, hardware provisioning, data replication, + hardware failure and recovery, or time-consuming hardware + migrations. + + Amazon Glacier is a great storage choice when low storage cost is + paramount, your data is rarely retrieved, and retrieval latency of + several hours is acceptable. If your application requires fast or + frequent access to your data, consider using Amazon S3. For more + information, go to `Amazon Simple Storage Service (Amazon S3)`_. + + You can store any kind of data in any format. There is no maximum + limit on the total amount of data you can store in Amazon Glacier. + + If you are a first-time user of Amazon Glacier, we recommend that + you begin by reading the following sections in the Amazon Glacier + Developer Guide : + + + + `What is Amazon Glacier`_ - This section of the Developer Guide + describes the underlying data model, the operations it supports, + and the AWS SDKs that you can use to interact with the service. + + `Getting Started with Amazon Glacier`_ - The Getting Started + section walks you through the process of creating a vault, + uploading archives, creating jobs to download archives, retrieving + the job output, and deleting archives. + """ + Version = '2012-06-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + account_id='-', is_secure=True, port=None, + proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, path='/', + provider='aws', security_token=None, + suppress_consec_slashes=True, + region=None, region_name='us-east-1', + profile_name=None): + + if not region: + for reg in boto.glacier.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + self.account_id = account_id + super(Layer1, self).__init__(region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, debug, + https_connection_factory, + path, provider, security_token, + suppress_consec_slashes, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def make_request(self, verb, resource, headers=None, + data='', ok_responses=(200,), params=None, + sender=None, response_headers=None): + if headers is None: + headers = {} + headers['x-amz-glacier-version'] = self.Version + uri = '/%s/%s' % (self.account_id, resource) + response = super(Layer1, self).make_request(verb, uri, + params=params, + headers=headers, + sender=sender, + data=data) + if response.status in ok_responses: + return GlacierResponse(response, response_headers) + else: + # create glacier-specific exceptions + raise UnexpectedHTTPResponseError(ok_responses, response) + + # Vaults + + def list_vaults(self, limit=None, marker=None): + """ + This operation lists all vaults owned by the calling user's + account. The list returned in the response is ASCII-sorted by + vault name. + + By default, this operation returns up to 1,000 items. If there + are more vaults to list, the response `marker` field contains + the vault Amazon Resource Name (ARN) at which to continue the + list with a new List Vaults request; otherwise, the `marker` + field is `null`. To return a list of vaults that begins at a + specific vault, set the `marker` request parameter to the + vault ARN you obtained from a previous List Vaults request. + You can also limit the number of vaults returned in the + response by specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `List + Vaults `_ in the Amazon Glacier Developer Guide . + + :type marker: string + :param marker: A string used for pagination. The marker specifies the + vault ARN after which the listing of vaults should begin. + + :type limit: string + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the List Vaults operation returns up to + 1,000 items. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + return self.make_request('GET', 'vaults', params=params) + + def describe_vault(self, vault_name): + """ + This operation returns information about a vault, including + the vault's Amazon Resource Name (ARN), the date the vault was + created, the number of archives it contains, and the total + size of all the archives in the vault. The number of archives + and their total size are as of the last inventory generation. + This means that if you add or remove an archive from a vault, + and then immediately use Describe Vault, the change in + contents will not be immediately reflected. If you want to + retrieve the latest inventory of the vault, use InitiateJob. + Amazon Glacier generates vault inventories approximately + daily. For more information, see `Downloading a Vault + Inventory in Amazon Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Retrieving Vault Metadata in Amazon Glacier`_ and `Describe + Vault `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('GET', uri) + + def create_vault(self, vault_name): + """ + This operation creates a new vault with the specified name. + The name of the vault must be unique within a region for an + AWS account. You can create up to 1,000 vaults per account. If + you need to create more vaults, contact Amazon Glacier. + + You must use the following guidelines when naming a vault. + + + + + Names can be between 1 and 255 characters long. + + Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' + (hyphen), and '.' (period). + + + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Creating a Vault in Amazon Glacier`_ and `Create Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('PUT', uri, ok_responses=(201,), + response_headers=[('Location', 'Location')]) + + def delete_vault(self, vault_name): + """ + This operation deletes a vault. Amazon Glacier will delete a + vault only if there are no archives in the vault as of the + last inventory and there have been no writes to the vault + since the last inventory. If either of these conditions is not + satisfied, the vault deletion fails (that is, the vault is not + removed) and Amazon Glacier returns an error. You can use + DescribeVault to return the number of archives in a vault, and + you can use `Initiate a Job (POST jobs)`_ to initiate a new + inventory retrieval for a vault. The inventory contains the + archive IDs you use to delete archives using `Delete Archive + (DELETE archive)`_. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting a Vault in Amazon Glacier`_ and `Delete Vault `_ in + the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s' % vault_name + return self.make_request('DELETE', uri, ok_responses=(204,)) + + def get_vault_notifications(self, vault_name): + """ + This operation retrieves the `notification-configuration` + subresource of the specified vault. + + For information about setting a notification configuration on + a vault, see SetVaultNotifications. If a notification + configuration for a vault is not set, the operation returns a + `404 Not Found` error. For more information about vault + notifications, see `Configuring Vault Notifications in Amazon + Glacier`_. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Get + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s/notification-configuration' % vault_name + return self.make_request('GET', uri) + + def set_vault_notifications(self, vault_name, notification_config): + """ + This operation configures notifications that will be sent when + specific events happen to a vault. By default, you don't get + any notifications. + + To configure vault notifications, send a PUT request to the + `notification-configuration` subresource of the vault. The + request should include a JSON document that provides an Amazon + SNS topic and specific events for which you want Amazon + Glacier to send notifications to the topic. + + Amazon SNS topics must grant permission to the vault to be + allowed to publish notifications to the topic. You can + configure a vault to publish a notification for the following + vault events: + + + + **ArchiveRetrievalCompleted** This event occurs when a job + that was initiated for an archive retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + **InventoryRetrievalCompleted** This event occurs when a job + that was initiated for an inventory retrieval is completed + (InitiateJob). The status of the completed job can be + "Succeeded" or "Failed". The notification sent to the SNS + topic is the same output as returned from DescribeJob. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and `Set + Vault Notification Configuration `_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type vault_notification_config: dict + :param vault_notification_config: Provides options for specifying + notification configuration. + + The format of the dictionary is: + + {'SNSTopic': 'mytopic', + 'Events': [event1,...]} + """ + uri = 'vaults/%s/notification-configuration' % vault_name + json_config = json.dumps(notification_config) + return self.make_request('PUT', uri, data=json_config, + ok_responses=(204,)) + + def delete_vault_notifications(self, vault_name): + """ + This operation deletes the notification configuration set for + a vault. The operation is eventually consistent;that is, it + might take some time for Amazon Glacier to completely disable + the notifications and you might still receive some + notifications for a short time after you send the delete + request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Configuring Vault Notifications in Amazon Glacier`_ and + `Delete Vault Notification Configuration `_ in the Amazon + Glacier Developer Guide. + + :type vault_name: string + :param vault_name: The name of the vault. + """ + uri = 'vaults/%s/notification-configuration' % vault_name + return self.make_request('DELETE', uri, ok_responses=(204,)) + + # Jobs + + def list_jobs(self, vault_name, completed=None, status_code=None, + limit=None, marker=None): + """ + This operation lists jobs for a vault, including jobs that are + in-progress and jobs that have recently finished. + + + Amazon Glacier retains recently completed jobs for a period + before deleting them; however, it eventually removes completed + jobs. The output of completed jobs can be retrieved. Retaining + completed jobs for a period of time after they have completed + enables you to get a job output in the event you miss the job + completion notification or your first attempt to download it + fails. For example, suppose you start an archive retrieval job + to download an archive. After the job completes, you start to + download the archive but encounter a network error. In this + scenario, you can retry and download the archive while the job + exists. + + + To retrieve an archive or retrieve a vault inventory from + Amazon Glacier, you first initiate a job, and after the job + completes, you download the data. For an archive retrieval, + the output is the archive data, and for an inventory + retrieval, it is the inventory list. The List Job operation + returns a list of these jobs sorted by job initiation time. + + This List Jobs operation supports pagination. By default, this + operation returns up to 1,000 jobs in the response. You should + always check the response for a `marker` at which to continue + the list; if there are no more items the `marker` is `null`. + To return a list of jobs that begins at a specific job, set + the `marker` request parameter to the value you obtained from + a previous List Jobs request. You can also limit the number of + jobs returned in the response by specifying the `limit` + parameter in the request. + + Additionally, you can filter the jobs list returned by + specifying an optional `statuscode` (InProgress, Succeeded, or + Failed) and `completed` (true, false) parameter. The + `statuscode` allows you to specify that only jobs that match a + specified status are returned. The `completed` parameter + allows you to specify that only jobs in a specific completion + state are returned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For the underlying REST API, go to `List Jobs `_ + + :type vault_name: string + :param vault_name: The name of the vault. + + :type limit: string + :param limit: Specifies that the response be limited to the specified + number of items or fewer. If not specified, the List Jobs operation + returns up to 1,000 jobs. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the job at which the listing of jobs should begin. Get + the marker value from a previous List Jobs response. You need only + include the marker if you are continuing the pagination of results + started in a previous List Jobs request. + + :type statuscode: string + :param statuscode: Specifies the type of job status to return. You can + specify the following values: "InProgress", "Succeeded", or + "Failed". + + :type completed: string + :param completed: Specifies the state of the jobs to return. You can + specify `True` or `False`. + + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + if status_code: + params['statuscode'] = status_code + if completed is not None: + params['completed'] = 'true' if completed else 'false' + uri = 'vaults/%s/jobs' % vault_name + return self.make_request('GET', uri, params=params) + + def describe_job(self, vault_name, job_id): + """ + This operation returns information about a job you previously + initiated, including the job initiation date, the user who + initiated the job, the job status code/message and the Amazon + SNS topic to notify after Amazon Glacier completes the job. + For more information about initiating a job, see InitiateJob. + + + This operation enables you to check the status of your job. + However, it is strongly recommended that you set up an Amazon + SNS topic and specify it in your initiate job request so that + Amazon Glacier can notify the topic after it completes the + job. + + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For information about the underlying REST API, go to `Working + with Archives in Amazon Glacier`_ in the Amazon Glacier + Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The ID of the job to describe. + """ + uri = 'vaults/%s/jobs/%s' % (vault_name, job_id) + return self.make_request('GET', uri, ok_responses=(200,)) + + def initiate_job(self, vault_name, job_data): + """ + This operation initiates a job of the specified type. In this + release, you can initiate a job to retrieve either an archive + or a vault inventory (a list of archives in a vault). + + Retrieving data from Amazon Glacier is a two-step process: + + + #. Initiate a retrieval job. + #. After the job completes, download the bytes. + + + The retrieval request is executed asynchronously. When you + initiate a retrieval job, Amazon Glacier creates a job and + returns a job ID in the response. When Amazon Glacier + completes the job, you can get the job output (archive or + inventory data). For information about getting job output, see + GetJobOutput operation. + + The job must complete before you can get its output. To + determine when a job is complete, you have the following + options: + + + + **Use Amazon SNS Notification** You can specify an Amazon + Simple Notification Service (Amazon SNS) topic to which Amazon + Glacier can post a notification after the job is completed. + You can specify an SNS topic per job request. The notification + is sent only after Amazon Glacier completes the job. In + addition to specifying an SNS topic per job request, you can + configure vault notifications for a vault so that job + notifications are always sent. For more information, see + SetVaultNotifications. + + **Get job details** You can make a DescribeJob request to + obtain job status information while a job is in progress. + However, it is more efficient to use an Amazon SNS + notification to determine when a job is complete. + + + + The information you get via notification is same that you get + by calling DescribeJob. + + + If for a specific event, you add both the notification + configuration on the vault and also specify an SNS topic in + your initiate job request, Amazon Glacier sends both + notifications. For more information, see + SetVaultNotifications. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + **About the Vault Inventory** + + Amazon Glacier prepares an inventory for each vault + periodically, every 24 hours. When you initiate a job for a + vault inventory, Amazon Glacier returns the last inventory for + the vault. The inventory data you get might be up to a day or + two days old. Also, the initiate inventory job might take some + time to complete before you can download the vault inventory. + So you do not want to retrieve a vault inventory for each + vault operation. However, in some scenarios, you might find + the vault inventory useful. For example, when you upload an + archive, you can provide an archive description but not an + archive name. Amazon Glacier provides you a unique archive ID, + an opaque string of characters. So, you might maintain your + own database that maps archive names to their corresponding + Amazon Glacier assigned archive IDs. You might find the vault + inventory useful in the event you need to reconcile + information in your database with the actual vault inventory. + + **About Ranged Archive Retrieval** + + You can initiate an archive retrieval for the whole archive or + a range of the archive. In the case of ranged archive + retrieval, you specify a byte range to return or the whole + archive. The range specified must be megabyte (MB) aligned, + that is the range start value must be divisible by 1 MB and + range end value plus 1 must be divisible by 1 MB or equal the + end of the archive. If the ranged archive retrieval is not + megabyte aligned, this operation returns a 400 response. + Furthermore, to ensure you get checksum values for data you + download using Get Job Output API, the range must be tree hash + aligned. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Initiate a Job`_ and `Downloading a Vault Inventory`_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_parameters: dict + :param job_parameters: Provides options for specifying job information. + The dictionary can contain the following attributes: + + * ArchiveId - The ID of the archive you want to retrieve. + This field is required only if the Type is set to + archive-retrieval. + * Description - The optional description for the job. + * Format - When initiating a job to retrieve a vault + inventory, you can optionally add this parameter to + specify the output format. Valid values are: CSV|JSON. + * SNSTopic - The Amazon SNS topic ARN where Amazon Glacier + sends a notification when the job is completed and the + output is ready for you to download. + * Type - The job type. Valid values are: + archive-retrieval|inventory-retrieval + * RetrievalByteRange - Optionally specify the range of + bytes to retrieve. + * InventoryRetrievalParameters: Optional job parameters + * Format - The output format, like "JSON" + * StartDate - ISO8601 starting date string + * EndDate - ISO8601 ending date string + * Limit - Maximum number of entries + * Marker - A unique string used for pagination + + """ + uri = 'vaults/%s/jobs' % vault_name + response_headers = [('x-amz-job-id', u'JobId'), + ('Location', u'Location')] + json_job_data = json.dumps(job_data) + return self.make_request('POST', uri, data=json_job_data, + ok_responses=(202,), + response_headers=response_headers) + + def get_job_output(self, vault_name, job_id, byte_range=None): + """ + This operation downloads the output of the job you initiated + using InitiateJob. Depending on the job type you specified + when you initiated the job, the output will be either the + content of an archive or a vault inventory. + + A job ID will not expire for at least 24 hours after Amazon + Glacier completes the job. That is, you can download the job + output within the 24 hours period after Amazon Glacier + completes the job. + + If the job output is large, then you can use the `Range` + request header to retrieve a portion of the output. This + allows you to download the entire output in smaller chunks of + bytes. For example, suppose you have 1 GB of job output you + want to download and you decide to download 128 MB chunks of + data at a time, which is a total of eight Get Job Output + requests. You use the following process to download the job + output: + + + #. Download a 128 MB chunk of output by specifying the + appropriate byte range using the `Range` header. + #. Along with the data, the response includes a checksum of + the payload. You compute the checksum of the payload on the + client and compare it with the checksum you received in the + response to ensure you received all the expected data. + #. Repeat steps 1 and 2 for all the eight 128 MB chunks of + output data, each time specifying the appropriate byte range. + #. After downloading all the parts of the job output, you have + a list of eight checksum values. Compute the tree hash of + these values to find the checksum of the entire output. Using + the Describe Job API, obtain job information of the job that + provided you the output. The response includes the checksum of + the entire archive stored in Amazon Glacier. You compare this + value with the checksum you computed to ensure you have + downloaded the entire archive content with no errors. + + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Downloading a Vault Inventory`_, `Downloading an Archive`_, + and `Get Job Output `_ + + :type account_id: string + :param account_id: The `AccountId` is the AWS Account ID. You can + specify either the AWS Account ID or optionally a '-', in which + case Amazon Glacier uses the AWS Account ID associated with the + credentials used to sign the request. If you specify your Account + ID, do not include hyphens in it. + + :type vault_name: string + :param vault_name: The name of the vault. + + :type job_id: string + :param job_id: The job ID whose data is downloaded. + + :type byte_range: string + :param byte_range: The range of bytes to retrieve from the output. For + example, if you want to download the first 1,048,576 bytes, specify + "Range: bytes=0-1048575". By default, this operation downloads the + entire output. + """ + response_headers = [('x-amz-sha256-tree-hash', u'TreeHash'), + ('Content-Range', u'ContentRange'), + ('Content-Type', u'ContentType')] + headers = None + if byte_range: + headers = {'Range': 'bytes=%d-%d' % byte_range} + uri = 'vaults/%s/jobs/%s/output' % (vault_name, job_id) + response = self.make_request('GET', uri, headers=headers, + ok_responses=(200, 206), + response_headers=response_headers) + return response + + # Archives + + def upload_archive(self, vault_name, archive, + linear_hash, tree_hash, description=None): + """ + This operation adds an archive to a vault. This is a + synchronous operation, and for a successful upload, your data + is durably persisted. Amazon Glacier returns the archive ID in + the `x-amz-archive-id` header of the response. + + You must use the archive ID to access your data in Amazon + Glacier. After you upload an archive, you should save the + archive ID returned so that you can retrieve or delete the + archive later. Besides saving the archive ID, you can also + index it and give it a friendly name to allow for better + searching. You can also use the optional archive description + field to specify how the archive is referred to in an external + index of archives, such as you might create in Amazon + DynamoDB. You can also get the vault inventory to obtain a + list of archive IDs in a vault. For more information, see + InitiateJob. + + You must provide a SHA256 tree hash of the data you are + uploading. For information about computing a SHA256 tree hash, + see `Computing Checksums`_. + + You can optionally specify an archive description of up to + 1,024 printable ASCII characters. You can get the archive + description when you either retrieve the archive or get the + vault inventory. For more information, see InitiateJob. Amazon + Glacier does not interpret the description in any way. An + archive description does not need to be unique. You cannot use + the description to retrieve or sort the archive list. + + Archives are immutable. After you upload an archive, you + cannot edit the archive or its description. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading an Archive in Amazon Glacier`_ and `Upload + Archive`_ in the Amazon Glacier Developer Guide . + + :type vault_name: str + :param vault_name: The name of the vault + + :type archive: bytes + :param archive: The data to upload. + + :type linear_hash: str + :param linear_hash: The SHA256 checksum (a linear hash) of the + payload. + + :type tree_hash: str + :param tree_hash: The user-computed SHA256 tree hash of the + payload. For more information on computing the + tree hash, see http://goo.gl/u7chF. + + :type description: str + :param description: The optional description of the archive you + are uploading. + """ + response_headers = [('x-amz-archive-id', u'ArchiveId'), + ('Location', u'Location'), + ('x-amz-sha256-tree-hash', u'TreeHash')] + uri = 'vaults/%s/archives' % vault_name + try: + content_length = str(len(archive)) + except (TypeError, AttributeError): + # If a file like object is provided, try to retrieve + # the file size via fstat. + content_length = str(os.fstat(archive.fileno()).st_size) + headers = {'x-amz-content-sha256': linear_hash, + 'x-amz-sha256-tree-hash': tree_hash, + 'Content-Length': content_length} + if description: + headers['x-amz-archive-description'] = description + if self._is_file_like(archive): + sender = ResettingFileSender(archive) + else: + sender = None + return self.make_request('POST', uri, headers=headers, + sender=sender, + data=archive, ok_responses=(201,), + response_headers=response_headers) + + def _is_file_like(self, archive): + return hasattr(archive, 'seek') and hasattr(archive, 'tell') + + def delete_archive(self, vault_name, archive_id): + """ + This operation deletes an archive from a vault. Subsequent + requests to initiate a retrieval of this archive will fail. + Archive retrievals that are in progress for this archive ID + may or may not succeed according to the following scenarios: + + + + If the archive retrieval job is actively preparing the data + for download when Amazon Glacier receives the delete archive + request, the archival retrieval operation might fail. + + If the archive retrieval job has successfully prepared the + archive for download when Amazon Glacier receives the delete + archive request, you will be able to download the output. + + + This operation is idempotent. Attempting to delete an already- + deleted archive does not result in an error. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Deleting an Archive in Amazon Glacier`_ and `Delete Archive`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type archive_id: string + :param archive_id: The ID of the archive to delete. + """ + uri = 'vaults/%s/archives/%s' % (vault_name, archive_id) + return self.make_request('DELETE', uri, ok_responses=(204,)) + + # Multipart + + def initiate_multipart_upload(self, vault_name, part_size, + description=None): + """ + This operation initiates a multipart upload. Amazon Glacier + creates a multipart upload resource and returns its ID in the + response. The multipart upload ID is used in subsequent + requests to upload parts of an archive (see + UploadMultipartPart). + + When you initiate a multipart upload, you specify the part + size in number of bytes. The part size must be a megabyte + (1024 KB) multiplied by a power of 2-for example, 1048576 (1 + MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so + on. The minimum allowable part size is 1 MB, and the maximum + is 4 GB. + + Every part you upload to this resource (see + UploadMultipartPart), except the last one, must have the same + size. The last one can be the same size or smaller. For + example, suppose you want to upload a 16.2 MB file. If you + initiate the multipart upload with a part size of 4 MB, you + will upload four parts of 4 MB each and one part of 0.2 MB. + + + You don't need to know the size of the archive when you start + a multipart upload because Amazon Glacier does not require you + to specify the overall archive size. + + + After you complete the multipart upload, Amazon Glacier + removes the multipart upload resource referenced by the ID. + Amazon Glacier also removes the multipart upload resource if + you cancel the multipart upload or it may be removed if there + is no activity for a period of 24 hours. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Initiate Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + The part size must be a megabyte (1024 KB) multiplied by a power of + 2, for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), + 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, + and the maximum is 4 GB (4096 MB). + + :type vault_name: str + :param vault_name: The name of the vault. + + :type description: str + :param description: The archive description that you are uploading in + parts. + + :type part_size: int + :param part_size: The size of each part except the last, in bytes. The + last part can be smaller than this part size. + """ + response_headers = [('x-amz-multipart-upload-id', u'UploadId'), + ('Location', u'Location')] + headers = {'x-amz-part-size': str(part_size)} + if description: + headers['x-amz-archive-description'] = description + uri = 'vaults/%s/multipart-uploads' % vault_name + response = self.make_request('POST', uri, headers=headers, + ok_responses=(201,), + response_headers=response_headers) + return response + + def complete_multipart_upload(self, vault_name, upload_id, + sha256_treehash, archive_size): + """ + You call this operation to inform Amazon Glacier that all the + archive parts have been uploaded and that Amazon Glacier can + now assemble the archive from the uploaded parts. After + assembling and saving the archive to the vault, Amazon Glacier + returns the URI path of the newly created archive resource. + Using the URI path, you can then access the archive. After you + upload an archive, you should save the archive ID returned to + retrieve the archive at a later point. You can also get the + vault inventory to obtain a list of archive IDs in a vault. + For more information, see InitiateJob. + + In the request, you must include the computed SHA256 tree hash + of the entire archive you have uploaded. For information about + computing a SHA256 tree hash, see `Computing Checksums`_. On + the server side, Amazon Glacier also constructs the SHA256 + tree hash of the assembled archive. If the values match, + Amazon Glacier saves the archive to the vault; otherwise, it + returns an error, and the operation fails. The ListParts + operation returns a list of parts uploaded for a specific + multipart upload. It includes checksum information for each + uploaded part that can be used to debug a bad checksum issue. + + Additionally, Amazon Glacier also checks for any missing + content ranges when assembling the archive, if missing content + ranges are found, Amazon Glacier returns an error and the + operation fails. + + Complete Multipart Upload is an idempotent operation. After + your first successful complete multipart upload, if you call + the operation again within a short period, the operation will + succeed and return the same archive ID. This is useful in the + event you experience a network issue that causes an aborted + connection or receive a 500 server error, in which case you + can repeat your Complete Multipart Upload request and get the + same archive ID without creating duplicate archives. Note, + however, that after the multipart upload completes, you cannot + call the List Parts operation and the multipart upload will + not appear in List Multipart Uploads response, even if + idempotent complete is possible. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Complete Multipart Upload`_ in the Amazon Glacier Developer + Guide . + + :type checksum: string + :param checksum: The SHA256 tree hash of the entire archive. It is the + tree hash of SHA256 tree hash of the individual parts. If the value + you specify in the request does not match the SHA256 tree hash of + the final assembled archive as computed by Amazon Glacier, Amazon + Glacier returns an error and the request fails. + + :type vault_name: str + :param vault_name: The name of the vault. + + :type upload_id: str + :param upload_id: The upload ID of the multipart upload. + + :type sha256_treehash: str + :param sha256_treehash: The SHA256 tree hash of the entire archive. + It is the tree hash of SHA256 tree hash of the individual parts. + If the value you specify in the request does not match the SHA256 + tree hash of the final assembled archive as computed by Amazon + Glacier, Amazon Glacier returns an error and the request fails. + + :type archive_size: int + :param archive_size: The total size, in bytes, of the entire + archive. This value should be the sum of all the sizes of + the individual parts that you uploaded. + """ + response_headers = [('x-amz-archive-id', u'ArchiveId'), + ('Location', u'Location')] + headers = {'x-amz-sha256-tree-hash': sha256_treehash, + 'x-amz-archive-size': str(archive_size)} + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + response = self.make_request('POST', uri, headers=headers, + ok_responses=(201,), + response_headers=response_headers) + return response + + def abort_multipart_upload(self, vault_name, upload_id): + """ + This operation aborts a multipart upload identified by the + upload ID. + + After the Abort Multipart Upload request succeeds, you cannot + upload any more parts to the multipart upload or complete the + multipart upload. Aborting a completed upload fails. However, + aborting an already-aborted upload will succeed, for a short + time. For more information about uploading a part and + completing a multipart upload, see UploadMultipartPart and + CompleteMultipartUpload. + + This operation is idempotent. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `Abort + Multipart Upload`_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type upload_id: string + :param upload_id: The upload ID of the multipart upload to delete. + """ + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + return self.make_request('DELETE', uri, ok_responses=(204,)) + + def list_multipart_uploads(self, vault_name, limit=None, marker=None): + """ + This operation lists in-progress multipart uploads for the + specified vault. An in-progress multipart upload is a + multipart upload that has been initiated by an + InitiateMultipartUpload request, but has not yet been + completed or aborted. The list returned in the List Multipart + Upload response has no guaranteed order. + + The List Multipart Uploads operation supports pagination. By + default, this operation returns up to 1,000 multipart uploads + in the response. You should always check the response for a + `marker` at which to continue the list; if there are no more + items the `marker` is `null`. To return a list of multipart + uploads that begins at a specific upload, set the `marker` + request parameter to the value you obtained from a previous + List Multipart Upload request. You can also limit the number + of uploads returned in the response by specifying the `limit` + parameter in the request. + + Note the difference between this operation and listing parts + (ListParts). The List Multipart Uploads operation lists all + multipart uploads for a vault and does not require a multipart + upload ID. The List Parts operation requires a multipart + upload ID since parts are associated with a single upload. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Multipart + Uploads `_ in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type limit: string + :param limit: Specifies the maximum number of uploads returned in the + response body. If this value is not specified, the List Uploads + operation returns up to 1,000 uploads. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the upload at which the listing of uploads should begin. + Get the marker value from a previous List Uploads response. You + need only include the marker if you are continuing the pagination + of results started in a previous List Uploads request. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + uri = 'vaults/%s/multipart-uploads' % vault_name + return self.make_request('GET', uri, params=params) + + def list_parts(self, vault_name, upload_id, limit=None, marker=None): + """ + This operation lists the parts of an archive that have been + uploaded in a specific multipart upload. You can make this + request at any time during an in-progress multipart upload + before you complete the upload (see CompleteMultipartUpload. + List Parts returns an error for completed uploads. The list + returned in the List Parts response is sorted by part range. + + The List Parts operation supports pagination. By default, this + operation returns up to 1,000 uploaded parts in the response. + You should always check the response for a `marker` at which + to continue the list; if there are no more items the `marker` + is `null`. To return a list of parts that begins at a specific + part, set the `marker` request parameter to the value you + obtained from a previous List Parts request. You can also + limit the number of parts returned in the response by + specifying the `limit` parameter in the request. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and the underlying REST API, go to + `Working with Archives in Amazon Glacier`_ and `List Parts`_ + in the Amazon Glacier Developer Guide . + + :type vault_name: string + :param vault_name: The name of the vault. + + :type upload_id: string + :param upload_id: The upload ID of the multipart upload. + + :type marker: string + :param marker: An opaque string used for pagination. This value + specifies the part at which the listing of parts should begin. Get + the marker value from the response of a previous List Parts + response. You need only include the marker if you are continuing + the pagination of results started in a previous List Parts request. + + :type limit: string + :param limit: Specifies the maximum number of parts returned in the + response body. If this value is not specified, the List Parts + operation returns up to 1,000 uploads. + """ + params = {} + if limit: + params['limit'] = limit + if marker: + params['marker'] = marker + uri = 'vaults/%s/multipart-uploads/%s' % (vault_name, upload_id) + return self.make_request('GET', uri, params=params) + + def upload_part(self, vault_name, upload_id, linear_hash, + tree_hash, byte_range, part_data): + """ + This operation uploads a part of an archive. You can upload + archive parts in any order. You can also upload them in + parallel. You can upload up to 10,000 parts for a multipart + upload. + + Amazon Glacier rejects your upload part request if any of the + following conditions is true: + + + + **SHA256 tree hash does not match**To ensure that part data + is not corrupted in transmission, you compute a SHA256 tree + hash of the part and include it in your request. Upon + receiving the part data, Amazon Glacier also computes a SHA256 + tree hash. If these hash values don't match, the operation + fails. For information about computing a SHA256 tree hash, see + `Computing Checksums`_. + + **Part size does not match**The size of each part except the + last must match the size specified in the corresponding + InitiateMultipartUpload request. The size of the last part + must be the same size as, or smaller than, the specified size. + If you upload a part whose size is smaller than the part size + you specified in your initiate multipart upload request and + that part is not the last part, then the upload part request + will succeed. However, the subsequent Complete Multipart + Upload request will fail. + + **Range does not align**The byte range value in the request + does not align with the part size specified in the + corresponding initiate request. For example, if you specify a + part size of 4194304 bytes (4 MB), then 0 to 4194303 bytes (4 + MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) are valid + part ranges. However, if you set a range value of 2 MB to 6 + MB, the range does not align with the part size and the upload + will fail. + + + This operation is idempotent. If you upload the same part + multiple times, the data included in the most recent request + overwrites the previously uploaded data. + + An AWS account has full permission to perform all operations + (actions). However, AWS Identity and Access Management (IAM) + users don't have any permissions by default. You must grant + them explicit permission to perform specific actions. For more + information, see `Access Control Using AWS Identity and Access + Management (IAM)`_. + + For conceptual information and underlying REST API, go to + `Uploading Large Archives in Parts (Multipart Upload)`_ and + `Upload Part `_ in the Amazon Glacier Developer Guide . + + :type vault_name: str + :param vault_name: The name of the vault. + + :type linear_hash: str + :param linear_hash: The SHA256 checksum (a linear hash) of the + payload. + + :type tree_hash: str + :param tree_hash: The user-computed SHA256 tree hash of the + payload. For more information on computing the + tree hash, see http://goo.gl/u7chF. + + :type upload_id: str + :param upload_id: The unique ID associated with this upload + operation. + + :type byte_range: tuple of ints + :param byte_range: Identifies the range of bytes in the assembled + archive that will be uploaded in this part. Amazon Glacier uses + this information to assemble the archive in the proper sequence. + The format of this header follows RFC 2616. An example header is + Content-Range:bytes 0-4194303/*. + + :type part_data: bytes + :param part_data: The data to be uploaded for the part + """ + headers = {'x-amz-content-sha256': linear_hash, + 'x-amz-sha256-tree-hash': tree_hash, + 'Content-Range': 'bytes %d-%d/*' % byte_range} + response_headers = [('x-amz-sha256-tree-hash', u'TreeHash')] + uri = 'vaults/%s/multipart-uploads/%s' % (str(vault_name), upload_id) + return self.make_request('PUT', uri, headers=headers, + data=part_data, ok_responses=(204,), + response_headers=response_headers) diff --git a/ext/boto/glacier/layer2.py b/ext/boto/glacier/layer2.py new file mode 100644 index 0000000000..abc36199bb --- /dev/null +++ b/ext/boto/glacier/layer2.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.glacier.layer1 import Layer1 +from boto.glacier.vault import Vault + + +class Layer2(object): + """ + Provides a more pythonic and friendly interface to Glacier based on Layer1 + """ + + def __init__(self, *args, **kwargs): + # Accept a passed in layer1, mainly to allow easier testing + if "layer1" in kwargs: + self.layer1 = kwargs["layer1"] + else: + self.layer1 = Layer1(*args, **kwargs) + + def create_vault(self, name): + """Creates a vault. + + :type name: str + :param name: The name of the vault + + :rtype: :class:`boto.glacier.vault.Vault` + :return: A Vault object representing the vault. + """ + self.layer1.create_vault(name) + return self.get_vault(name) + + def delete_vault(self, name): + """Delete a vault. + + This operation deletes a vault. Amazon Glacier will delete a + vault only if there are no archives in the vault as per the + last inventory and there have been no writes to the vault + since the last inventory. If either of these conditions is not + satisfied, the vault deletion fails (that is, the vault is not + removed) and Amazon Glacier returns an error. + + This operation is idempotent, you can send the same request + multiple times and it has no further effect after the first + time Amazon Glacier delete the specified vault. + + :type vault_name: str + :param vault_name: The name of the vault to delete. + """ + return self.layer1.delete_vault(name) + + def get_vault(self, name): + """ + Get an object representing a named vault from Glacier. This + operation does not check if the vault actually exists. + + :type name: str + :param name: The name of the vault + + :rtype: :class:`boto.glacier.vault.Vault` + :return: A Vault object representing the vault. + """ + response_data = self.layer1.describe_vault(name) + return Vault(self.layer1, response_data) + + def list_vaults(self): + """ + Return a list of all vaults associated with the account ID. + + :rtype: List of :class:`boto.glacier.vault.Vault` + :return: A list of Vault objects. + """ + vaults = [] + marker = None + while True: + response_data = self.layer1.list_vaults(marker=marker, limit=1000) + vaults.extend([Vault(self.layer1, rd) for rd in response_data['VaultList']]) + marker = response_data.get('Marker') + if not marker: + break + + return vaults diff --git a/ext/boto/glacier/response.py b/ext/boto/glacier/response.py new file mode 100644 index 0000000000..c7a2612c03 --- /dev/null +++ b/ext/boto/glacier/response.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.compat import json + + +class GlacierResponse(dict): + """ + Represents a response from Glacier layer1. It acts as a dictionary + containing the combined keys received via JSON in the body (if + supplied) and headers. + """ + def __init__(self, http_response, response_headers): + self.http_response = http_response + self.status = http_response.status + self[u'RequestId'] = http_response.getheader('x-amzn-requestid') + if response_headers: + for header_name, item_name in response_headers: + self[item_name] = http_response.getheader(header_name) + if http_response.status != 204: + if http_response.getheader('Content-Type') == 'application/json': + body = json.loads(http_response.read().decode('utf-8')) + self.update(body) + size = http_response.getheader('Content-Length', None) + if size is not None: + self.size = size + + def read(self, amt=None): + "Reads and returns the response body, or up to the next amt bytes." + return self.http_response.read(amt) diff --git a/ext/boto/glacier/utils.py b/ext/boto/glacier/utils.py new file mode 100644 index 0000000000..98847e3f16 --- /dev/null +++ b/ext/boto/glacier/utils.py @@ -0,0 +1,175 @@ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import hashlib +import math +import binascii + +from boto.compat import six + + +_MEGABYTE = 1024 * 1024 +DEFAULT_PART_SIZE = 4 * _MEGABYTE +MAXIMUM_NUMBER_OF_PARTS = 10000 + + +def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE): + """Calculate the minimum part size needed for a multipart upload. + + Glacier allows a maximum of 10,000 parts per upload. It also + states that the maximum archive size is 10,000 * 4 GB, which means + the part size can range from 1MB to 4GB (provided it is one 1MB + multiplied by a power of 2). + + This function will compute what the minimum part size must be in + order to upload a file of size ``size_in_bytes``. + + It will first check if ``default_part_size`` is sufficient for + a part size given the ``size_in_bytes``. If this is not the case, + then the smallest part size than can accomodate a file of size + ``size_in_bytes`` will be returned. + + If the file size is greater than the maximum allowed archive + size of 10,000 * 4GB, a ``ValueError`` will be raised. + + """ + # The default part size (4 MB) will be too small for a very large + # archive, as there is a limit of 10,000 parts in a multipart upload. + # This puts the maximum allowed archive size with the default part size + # at 40,000 MB. We need to do a sanity check on the part size, and find + # one that works if the default is too small. + part_size = _MEGABYTE + if (default_part_size * MAXIMUM_NUMBER_OF_PARTS) < size_in_bytes: + if size_in_bytes > (4096 * _MEGABYTE * 10000): + raise ValueError("File size too large: %s" % size_in_bytes) + min_part_size = size_in_bytes / 10000 + power = 3 + while part_size < min_part_size: + part_size = math.ldexp(_MEGABYTE, power) + power += 1 + part_size = int(part_size) + else: + part_size = default_part_size + return part_size + + +def chunk_hashes(bytestring, chunk_size=_MEGABYTE): + chunk_count = int(math.ceil(len(bytestring) / float(chunk_size))) + hashes = [] + for i in range(chunk_count): + start = i * chunk_size + end = (i + 1) * chunk_size + hashes.append(hashlib.sha256(bytestring[start:end]).digest()) + if not hashes: + return [hashlib.sha256(b'').digest()] + return hashes + + +def tree_hash(fo): + """ + Given a hash of each 1MB chunk (from chunk_hashes) this will hash + together adjacent hashes until it ends up with one big one. So a + tree of hashes. + """ + hashes = [] + hashes.extend(fo) + while len(hashes) > 1: + new_hashes = [] + while True: + if len(hashes) > 1: + first = hashes.pop(0) + second = hashes.pop(0) + new_hashes.append(hashlib.sha256(first + second).digest()) + elif len(hashes) == 1: + only = hashes.pop(0) + new_hashes.append(only) + else: + break + hashes.extend(new_hashes) + return hashes[0] + + +def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024): + """Compute the linear and tree hash from a fileobj. + + This function will compute the linear/tree hash of a fileobj + in a single pass through the fileobj. + + :param fileobj: A file like object. + + :param chunk_size: The size of the chunks to use for the tree + hash. This is also the buffer size used to read from + `fileobj`. + + :rtype: tuple + :return: A tuple of (linear_hash, tree_hash). Both hashes + are returned in hex. + + """ + # Python 3+, not binary + if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode: + raise ValueError('File-like object must be opened in binary mode!') + + linear_hash = hashlib.sha256() + chunks = [] + chunk = fileobj.read(chunk_size) + while chunk: + # It's possible to get a file-like object that has no mode (checked + # above) and returns something other than bytes (e.g. str). So here + # we try to catch that and encode to bytes. + if not isinstance(chunk, bytes): + chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8') + linear_hash.update(chunk) + chunks.append(hashlib.sha256(chunk).digest()) + chunk = fileobj.read(chunk_size) + if not chunks: + chunks = [hashlib.sha256(b'').digest()] + return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks)) + + +def bytes_to_hex(str_as_bytes): + return binascii.hexlify(str_as_bytes) + + +def tree_hash_from_str(str_as_bytes): + """ + + :type str_as_bytes: str + :param str_as_bytes: The string for which to compute the tree hash. + + :rtype: str + :return: The computed tree hash, returned as hex. + + """ + return bytes_to_hex(tree_hash(chunk_hashes(str_as_bytes))) + + +class ResettingFileSender(object): + def __init__(self, archive): + self._archive = archive + self._starting_offset = archive.tell() + + def __call__(self, connection, method, path, body, headers): + try: + connection.request(method, path, self._archive, headers) + return connection.getresponse() + finally: + self._archive.seek(self._starting_offset) diff --git a/ext/boto/glacier/vault.py b/ext/boto/glacier/vault.py new file mode 100644 index 0000000000..45d276cadb --- /dev/null +++ b/ext/boto/glacier/vault.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# Copyright (c) 2012 Robie Basak +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import codecs +from boto.glacier.exceptions import UploadArchiveError +from boto.glacier.job import Job +from boto.glacier.writer import compute_hashes_from_fileobj, \ + resume_file_upload, Writer +from boto.glacier.concurrent import ConcurrentUploader +from boto.glacier.utils import minimum_part_size, DEFAULT_PART_SIZE +import os.path + + +_MEGABYTE = 1024 * 1024 +_GIGABYTE = 1024 * _MEGABYTE + +MAXIMUM_ARCHIVE_SIZE = 10000 * 4 * _GIGABYTE +MAXIMUM_NUMBER_OF_PARTS = 10000 + + +class Vault(object): + + DefaultPartSize = DEFAULT_PART_SIZE + SingleOperationThreshold = 100 * _MEGABYTE + + ResponseDataElements = (('VaultName', 'name', None), + ('VaultARN', 'arn', None), + ('CreationDate', 'creation_date', None), + ('LastInventoryDate', 'last_inventory_date', None), + ('SizeInBytes', 'size', 0), + ('NumberOfArchives', 'number_of_archives', 0)) + + def __init__(self, layer1, response_data=None): + self.layer1 = layer1 + if response_data: + for response_name, attr_name, default in self.ResponseDataElements: + value = response_data[response_name] + setattr(self, attr_name, value) + else: + for response_name, attr_name, default in self.ResponseDataElements: + setattr(self, attr_name, default) + + def __repr__(self): + return 'Vault("%s")' % self.arn + + def delete(self): + """ + Delete's this vault. WARNING! + """ + self.layer1.delete_vault(self.name) + + def upload_archive(self, filename, description=None): + """ + Adds an archive to a vault. For archives greater than 100MB the + multipart upload will be used. + + :type file: str + :param file: A filename to upload + + :type description: str + :param description: An optional description for the archive. + + :rtype: str + :return: The archive id of the newly created archive + """ + if os.path.getsize(filename) > self.SingleOperationThreshold: + return self.create_archive_from_file(filename, description=description) + return self._upload_archive_single_operation(filename, description) + + def _upload_archive_single_operation(self, filename, description): + """ + Adds an archive to a vault in a single operation. It's recommended for + archives less than 100MB + + :type file: str + :param file: A filename to upload + + :type description: str + :param description: A description for the archive. + + :rtype: str + :return: The archive id of the newly created archive + """ + with open(filename, 'rb') as fileobj: + linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj) + fileobj.seek(0) + response = self.layer1.upload_archive(self.name, fileobj, + linear_hash, tree_hash, + description) + return response['ArchiveId'] + + def create_archive_writer(self, part_size=DefaultPartSize, + description=None): + """ + Create a new archive and begin a multi-part upload to it. + Returns a file-like object to which the data for the archive + can be written. Once all the data is written the file-like + object should be closed, you can then call the get_archive_id + method on it to get the ID of the created archive. + + :type part_size: int + :param part_size: The part size for the multipart upload. + + :type description: str + :param description: An optional description for the archive. + + :rtype: :class:`boto.glacier.writer.Writer` + :return: A Writer object that to which the archive data + should be written. + """ + response = self.layer1.initiate_multipart_upload(self.name, + part_size, + description) + return Writer(self, response['UploadId'], part_size=part_size) + + def create_archive_from_file(self, filename=None, file_obj=None, + description=None, upload_id_callback=None): + """ + Create a new archive and upload the data from the given file + or file-like object. + + :type filename: str + :param filename: A filename to upload + + :type file_obj: file + :param file_obj: A file-like object to upload + + :type description: str + :param description: An optional description for the archive. + + :type upload_id_callback: function + :param upload_id_callback: if set, call with the upload_id as the + only parameter when it becomes known, to enable future calls + to resume_archive_from_file in case resume is needed. + + :rtype: str + :return: The archive id of the newly created archive + """ + part_size = self.DefaultPartSize + if not file_obj: + file_size = os.path.getsize(filename) + try: + part_size = minimum_part_size(file_size, part_size) + except ValueError: + raise UploadArchiveError("File size of %s bytes exceeds " + "40,000 GB archive limit of Glacier.") + file_obj = open(filename, "rb") + writer = self.create_archive_writer( + description=description, + part_size=part_size) + if upload_id_callback: + upload_id_callback(writer.upload_id) + while True: + data = file_obj.read(part_size) + if not data: + break + writer.write(data) + writer.close() + return writer.get_archive_id() + + @staticmethod + def _range_string_to_part_index(range_string, part_size): + start, inside_end = [int(value) for value in range_string.split('-')] + end = inside_end + 1 + length = end - start + if length == part_size + 1: + # Off-by-one bug in Amazon's Glacier implementation, + # see: https://forums.aws.amazon.com/thread.jspa?threadID=106866 + # Workaround: since part_size is too big by one byte, adjust it + end -= 1 + inside_end -= 1 + length -= 1 + assert not (start % part_size), ( + "upload part start byte is not on a part boundary") + assert (length <= part_size), "upload part is bigger than part size" + return start // part_size + + def resume_archive_from_file(self, upload_id, filename=None, + file_obj=None): + """Resume upload of a file already part-uploaded to Glacier. + + The resumption of an upload where the part-uploaded section is empty + is a valid degenerate case that this function can handle. + + One and only one of filename or file_obj must be specified. + + :type upload_id: str + :param upload_id: existing Glacier upload id of upload being resumed. + + :type filename: str + :param filename: file to open for resume + + :type fobj: file + :param fobj: file-like object containing local data to resume. This + must read from the start of the entire upload, not just from the + point being resumed. Use fobj.seek(0) to achieve this if necessary. + + :rtype: str + :return: The archive id of the newly created archive + + """ + part_list_response = self.list_all_parts(upload_id) + part_size = part_list_response['PartSizeInBytes'] + + part_hash_map = {} + for part_desc in part_list_response['Parts']: + part_index = self._range_string_to_part_index( + part_desc['RangeInBytes'], part_size) + part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec') + part_hash_map[part_index] = part_tree_hash + + if not file_obj: + file_obj = open(filename, "rb") + + return resume_file_upload( + self, upload_id, part_size, file_obj, part_hash_map) + + def concurrent_create_archive_from_file(self, filename, description, + **kwargs): + """ + Create a new archive from a file and upload the given + file. + + This is a convenience method around the + :class:`boto.glacier.concurrent.ConcurrentUploader` + class. This method will perform a multipart upload + and upload the parts of the file concurrently. + + :type filename: str + :param filename: A filename to upload + + :param kwargs: Additional kwargs to pass through to + :py:class:`boto.glacier.concurrent.ConcurrentUploader`. + You can pass any argument besides the ``api`` and + ``vault_name`` param (these arguments are already + passed to the ``ConcurrentUploader`` for you). + + :raises: `boto.glacier.exception.UploadArchiveError` is an error + occurs during the upload process. + + :rtype: str + :return: The archive id of the newly created archive + + """ + uploader = ConcurrentUploader(self.layer1, self.name, **kwargs) + archive_id = uploader.upload(filename, description) + return archive_id + + def retrieve_archive(self, archive_id, sns_topic=None, + description=None): + """ + Initiate a archive retrieval job to download the data from an + archive. You will need to wait for the notification from + Amazon (via SNS) before you can actually download the data, + this takes around 4 hours. + + :type archive_id: str + :param archive_id: The id of the archive + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the retrieval job. + """ + job_data = {'Type': 'archive-retrieval', + 'ArchiveId': archive_id} + if sns_topic is not None: + job_data['SNSTopic'] = sns_topic + if description is not None: + job_data['Description'] = description + + response = self.layer1.initiate_job(self.name, job_data) + return self.get_job(response['JobId']) + + def retrieve_inventory(self, sns_topic=None, + description=None, byte_range=None, + start_date=None, end_date=None, + limit=None): + """ + Initiate a inventory retrieval job to list the items in the + vault. You will need to wait for the notification from + Amazon (via SNS) before you can actually download the data, + this takes around 4 hours. + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + + :rtype: str + :return: The ID of the job + """ + job_data = {'Type': 'inventory-retrieval'} + if sns_topic is not None: + job_data['SNSTopic'] = sns_topic + if description is not None: + job_data['Description'] = description + if byte_range is not None: + job_data['RetrievalByteRange'] = byte_range + if start_date is not None or end_date is not None or limit is not None: + rparams = {} + + if start_date is not None: + rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if end_date is not None: + rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z') + if limit is not None: + rparams['Limit'] = limit + + job_data['InventoryRetrievalParameters'] = rparams + + response = self.layer1.initiate_job(self.name, job_data) + return response['JobId'] + + def retrieve_inventory_job(self, **kwargs): + """ + Identical to ``retrieve_inventory``, but returns a ``Job`` instance + instead of just the job ID. + + :type description: str + :param description: An optional description for the job. + + :type sns_topic: str + :param sns_topic: The Amazon SNS topic ARN where Amazon Glacier + sends notification when the job is completed and the output + is ready for you to download. + + :type byte_range: str + :param byte_range: Range of bytes to retrieve. + + :type start_date: DateTime + :param start_date: Beginning of the date range to query. + + :type end_date: DateTime + :param end_date: End of the date range to query. + + :type limit: int + :param limit: Limits the number of results returned. + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the retrieval job. + """ + job_id = self.retrieve_inventory(**kwargs) + return self.get_job(job_id) + + def delete_archive(self, archive_id): + """ + This operation deletes an archive from the vault. + + :type archive_id: str + :param archive_id: The ID for the archive to be deleted. + """ + return self.layer1.delete_archive(self.name, archive_id) + + def get_job(self, job_id): + """ + Get an object representing a job in progress. + + :type job_id: str + :param job_id: The ID of the job + + :rtype: :class:`boto.glacier.job.Job` + :return: A Job object representing the job. + """ + response_data = self.layer1.describe_job(self.name, job_id) + return Job(self, response_data) + + def list_jobs(self, completed=None, status_code=None): + """ + Return a list of Job objects related to this vault. + + :type completed: boolean + :param completed: Specifies the state of the jobs to return. + If a value of True is passed, only completed jobs will + be returned. If a value of False is passed, only + uncompleted jobs will be returned. If no value is + passed, all jobs will be returned. + + :type status_code: string + :param status_code: Specifies the type of job status to return. + Valid values are: InProgress|Succeeded|Failed. If not + specified, jobs with all status codes are returned. + + :rtype: list of :class:`boto.glacier.job.Job` + :return: A list of Job objects related to this vault. + """ + response_data = self.layer1.list_jobs(self.name, completed, + status_code) + return [Job(self, jd) for jd in response_data['JobList']] + + def list_all_parts(self, upload_id): + """Automatically make and combine multiple calls to list_parts. + + Call list_parts as necessary, combining the results in case multiple + calls were required to get data on all available parts. + + """ + result = self.layer1.list_parts(self.name, upload_id) + marker = result['Marker'] + while marker: + additional_result = self.layer1.list_parts( + self.name, upload_id, marker=marker) + result['Parts'].extend(additional_result['Parts']) + marker = additional_result['Marker'] + # The marker makes no sense in an unpaginated result, and clearing it + # makes testing easier. This also has the nice property that the result + # is a normal (but expanded) response. + result['Marker'] = None + return result diff --git a/ext/boto/glacier/writer.py b/ext/boto/glacier/writer.py new file mode 100644 index 0000000000..fa3161ab3c --- /dev/null +++ b/ext/boto/glacier/writer.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# Copyright (c) 2012 Robie Basak +# Tree hash implementation from Aaron Brady bradya@gmail.com +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import hashlib + +from boto.glacier.utils import chunk_hashes, tree_hash, bytes_to_hex +# This import is provided for backwards compatibility. This function is +# now in boto.glacier.utils, but any existing code can still import +# this directly from this module. +from boto.glacier.utils import compute_hashes_from_fileobj + + +_ONE_MEGABYTE = 1024 * 1024 + + +class _Partitioner(object): + """Convert variable-size writes into part-sized writes + + Call write(data) with variable sized data as needed to write all data. Call + flush() after all data is written. + + This instance will call send_fn(part_data) as needed in part_size pieces, + except for the final part which may be shorter than part_size. Make sure to + call flush() to ensure that a short final part results in a final send_fn + call. + + """ + def __init__(self, part_size, send_fn): + self.part_size = part_size + self.send_fn = send_fn + self._buffer = [] + self._buffer_size = 0 + + def write(self, data): + if data == b'': + return + self._buffer.append(data) + self._buffer_size += len(data) + while self._buffer_size > self.part_size: + self._send_part() + + def _send_part(self): + data = b''.join(self._buffer) + # Put back any data remaining over the part size into the + # buffer + if len(data) > self.part_size: + self._buffer = [data[self.part_size:]] + self._buffer_size = len(self._buffer[0]) + else: + self._buffer = [] + self._buffer_size = 0 + # The part we will send + part = data[:self.part_size] + self.send_fn(part) + + def flush(self): + if self._buffer_size > 0: + self._send_part() + + +class _Uploader(object): + """Upload to a Glacier upload_id. + + Call upload_part for each part (in any order) and then close to complete + the upload. + + """ + def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): + self.vault = vault + self.upload_id = upload_id + self.part_size = part_size + self.chunk_size = chunk_size + self.archive_id = None + + self._uploaded_size = 0 + self._tree_hashes = [] + + self.closed = False + + def _insert_tree_hash(self, index, raw_tree_hash): + list_length = len(self._tree_hashes) + if index >= list_length: + self._tree_hashes.extend([None] * (list_length - index + 1)) + self._tree_hashes[index] = raw_tree_hash + + def upload_part(self, part_index, part_data): + """Upload a part to Glacier. + + :param part_index: part number where 0 is the first part + :param part_data: data to upload corresponding to this part + + """ + if self.closed: + raise ValueError("I/O operation on closed file") + # Create a request and sign it + part_tree_hash = tree_hash(chunk_hashes(part_data, self.chunk_size)) + self._insert_tree_hash(part_index, part_tree_hash) + + hex_tree_hash = bytes_to_hex(part_tree_hash) + linear_hash = hashlib.sha256(part_data).hexdigest() + start = self.part_size * part_index + content_range = (start, + (start + len(part_data)) - 1) + response = self.vault.layer1.upload_part(self.vault.name, + self.upload_id, + linear_hash, + hex_tree_hash, + content_range, part_data) + response.read() + self._uploaded_size += len(part_data) + + def skip_part(self, part_index, part_tree_hash, part_length): + """Skip uploading of a part. + + The final close call needs to calculate the tree hash and total size + of all uploaded data, so this is the mechanism for resume + functionality to provide it without actually uploading the data again. + + :param part_index: part number where 0 is the first part + :param part_tree_hash: binary tree_hash of part being skipped + :param part_length: length of part being skipped + + """ + if self.closed: + raise ValueError("I/O operation on closed file") + self._insert_tree_hash(part_index, part_tree_hash) + self._uploaded_size += part_length + + def close(self): + if self.closed: + return + if None in self._tree_hashes: + raise RuntimeError("Some parts were not uploaded.") + # Complete the multiplart glacier upload + hex_tree_hash = bytes_to_hex(tree_hash(self._tree_hashes)) + response = self.vault.layer1.complete_multipart_upload( + self.vault.name, self.upload_id, hex_tree_hash, + self._uploaded_size) + self.archive_id = response['ArchiveId'] + self.closed = True + + +def generate_parts_from_fobj(fobj, part_size): + data = fobj.read(part_size) + while data: + yield data.encode('utf-8') + data = fobj.read(part_size) + + +def resume_file_upload(vault, upload_id, part_size, fobj, part_hash_map, + chunk_size=_ONE_MEGABYTE): + """Resume upload of a file already part-uploaded to Glacier. + + The resumption of an upload where the part-uploaded section is empty is a + valid degenerate case that this function can handle. In this case, + part_hash_map should be an empty dict. + + :param vault: boto.glacier.vault.Vault object. + :param upload_id: existing Glacier upload id of upload being resumed. + :param part_size: part size of existing upload. + :param fobj: file object containing local data to resume. This must read + from the start of the entire upload, not just from the point being + resumed. Use fobj.seek(0) to achieve this if necessary. + :param part_hash_map: {part_index: part_tree_hash, ...} of data already + uploaded. Each supplied part_tree_hash will be verified and the part + re-uploaded if there is a mismatch. + :param chunk_size: chunk size of tree hash calculation. This must be + 1 MiB for Amazon. + + """ + uploader = _Uploader(vault, upload_id, part_size, chunk_size) + for part_index, part_data in enumerate( + generate_parts_from_fobj(fobj, part_size)): + part_tree_hash = tree_hash(chunk_hashes(part_data, chunk_size)) + if (part_index not in part_hash_map or + part_hash_map[part_index] != part_tree_hash): + uploader.upload_part(part_index, part_data) + else: + uploader.skip_part(part_index, part_tree_hash, len(part_data)) + uploader.close() + return uploader.archive_id + + +class Writer(object): + """ + Presents a file-like object for writing to a Amazon Glacier + Archive. The data is written using the multi-part upload API. + """ + def __init__(self, vault, upload_id, part_size, chunk_size=_ONE_MEGABYTE): + self.uploader = _Uploader(vault, upload_id, part_size, chunk_size) + self.partitioner = _Partitioner(part_size, self._upload_part) + self.closed = False + self.next_part_index = 0 + + def write(self, data): + if self.closed: + raise ValueError("I/O operation on closed file") + self.partitioner.write(data) + + def _upload_part(self, part_data): + self.uploader.upload_part(self.next_part_index, part_data) + self.next_part_index += 1 + + def close(self): + if self.closed: + return + self.partitioner.flush() + self.uploader.close() + self.closed = True + + def get_archive_id(self): + self.close() + return self.uploader.archive_id + + @property + def current_tree_hash(self): + """ + Returns the current tree hash for the data that's been written + **so far**. + + Only once the writing is complete is the final tree hash returned. + """ + return tree_hash(self.uploader._tree_hashes) + + @property + def current_uploaded_size(self): + """ + Returns the current uploaded size for the data that's been written + **so far**. + + Only once the writing is complete is the final uploaded size returned. + """ + return self.uploader._uploaded_size + + @property + def upload_id(self): + return self.uploader.upload_id + + @property + def vault(self): + return self.uploader.vault diff --git a/ext/boto/gs/__init__.py b/ext/boto/gs/__init__.py new file mode 100644 index 0000000000..bf4c0b942b --- /dev/null +++ b/ext/boto/gs/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/ext/boto/gs/acl.py b/ext/boto/gs/acl.py new file mode 100644 index 0000000000..57bdce1cbc --- /dev/null +++ b/ext/boto/gs/acl.py @@ -0,0 +1,308 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.gs.user import User +from boto.exception import InvalidAclError + +ACCESS_CONTROL_LIST = 'AccessControlList' +ALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers' +ALL_USERS = 'AllUsers' +DISPLAY_NAME = 'DisplayName' +DOMAIN = 'Domain' +EMAIL_ADDRESS = 'EmailAddress' +ENTRY = 'Entry' +ENTRIES = 'Entries' +GROUP_BY_DOMAIN = 'GroupByDomain' +GROUP_BY_EMAIL = 'GroupByEmail' +GROUP_BY_ID = 'GroupById' +ID = 'ID' +NAME = 'Name' +OWNER = 'Owner' +PERMISSION = 'Permission' +SCOPE = 'Scope' +TYPE = 'type' +USER_BY_EMAIL = 'UserByEmail' +USER_BY_ID = 'UserById' + + +CannedACLStrings = ['private', 'public-read', 'project-private', + 'public-read-write', 'authenticated-read', + 'bucket-owner-read', 'bucket-owner-full-control'] +"""A list of Google Cloud Storage predefined (canned) ACL strings.""" + +SupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL'] +"""A list of supported ACL permissions.""" + + +class ACL(object): + + def __init__(self, parent=None): + self.parent = parent + self.entries = Entries(self) + + @property + def acl(self): + return self + + def __repr__(self): + # Owner is optional in GS ACLs. + if hasattr(self, 'owner'): + entries_repr = ['Owner:%s' % self.owner.__repr__()] + else: + entries_repr = [''] + acl_entries = self.entries + if acl_entries: + for e in acl_entries.entry_list: + entries_repr.append(e.__repr__()) + return '<%s>' % ', '.join(entries_repr) + + # Method with same signature as boto.s3.acl.ACL.add_email_grant(), to allow + # polymorphic treatment at application layer. + def add_email_grant(self, permission, email_address): + entry = Entry(type=USER_BY_EMAIL, email_address=email_address, + permission=permission) + self.entries.entry_list.append(entry) + + # Method with same signature as boto.s3.acl.ACL.add_user_grant(), to allow + # polymorphic treatment at application layer. + def add_user_grant(self, permission, user_id): + entry = Entry(permission=permission, type=USER_BY_ID, id=user_id) + self.entries.entry_list.append(entry) + + def add_group_email_grant(self, permission, email_address): + entry = Entry(type=GROUP_BY_EMAIL, email_address=email_address, + permission=permission) + self.entries.entry_list.append(entry) + + def add_group_grant(self, permission, group_id): + entry = Entry(type=GROUP_BY_ID, id=group_id, permission=permission) + self.entries.entry_list.append(entry) + + def startElement(self, name, attrs, connection): + if name.lower() == OWNER.lower(): + self.owner = User(self) + return self.owner + elif name.lower() == ENTRIES.lower(): + self.entries = Entries(self) + return self.entries + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == OWNER.lower(): + pass + elif name.lower() == ENTRIES.lower(): + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s>' % ACCESS_CONTROL_LIST + # Owner is optional in GS ACLs. + if hasattr(self, 'owner'): + s += self.owner.to_xml() + acl_entries = self.entries + if acl_entries: + s += acl_entries.to_xml() + s += '' % ACCESS_CONTROL_LIST + return s + + +class Entries(object): + + def __init__(self, parent=None): + self.parent = parent + # Entries is the class that represents the same-named XML + # element. entry_list is the list within this class that holds the data. + self.entry_list = [] + + def __repr__(self): + entries_repr = [] + for e in self.entry_list: + entries_repr.append(e.__repr__()) + return '' % ', '.join(entries_repr) + + def startElement(self, name, attrs, connection): + if name.lower() == ENTRY.lower(): + entry = Entry(self) + self.entry_list.append(entry) + return entry + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == ENTRY.lower(): + pass + else: + setattr(self, name, value) + + def to_xml(self): + if not self.entry_list: + return '' + s = '<%s>' % ENTRIES + for entry in self.entry_list: + s += entry.to_xml() + s += '' % ENTRIES + return s + + +# Class that represents a single (Scope, Permission) entry in an ACL. +class Entry(object): + + def __init__(self, scope=None, type=None, id=None, name=None, + email_address=None, domain=None, permission=None): + if not scope: + scope = Scope(self, type, id, name, email_address, domain) + self.scope = scope + self.permission = permission + + def __repr__(self): + return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__()) + + def startElement(self, name, attrs, connection): + if name.lower() == SCOPE.lower(): + # The following if statement used to look like this: + # if not TYPE in attrs: + # which caused problems because older versions of the + # AttributesImpl class in the xml.sax library neglected to include + # a __contains__() method (which Python calls to implement the + # 'in' operator). So when you use the in operator, like the if + # statement above, Python invokes the __getiter__() method with + # index 0, which raises an exception. More recent versions of + # xml.sax include the __contains__() method, rendering the in + # operator functional. The work-around here is to formulate the + # if statement as below, which is the legal way to query + # AttributesImpl for containment (and is also how the added + # __contains__() method works). At one time gsutil disallowed + # xmlplus-based parsers, until this more specific problem was + # determined. + if TYPE not in attrs: + raise InvalidAclError('Missing "%s" in "%s" part of ACL' % + (TYPE, SCOPE)) + self.scope = Scope(self, attrs[TYPE]) + return self.scope + elif name.lower() == PERMISSION.lower(): + pass + else: + return None + + def endElement(self, name, value, connection): + if name.lower() == SCOPE.lower(): + pass + elif name.lower() == PERMISSION.lower(): + value = value.strip() + if not value in SupportedPermissions: + raise InvalidAclError('Invalid Permission "%s"' % value) + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s>' % ENTRY + s += self.scope.to_xml() + s += '<%s>%s' % (PERMISSION, self.permission, PERMISSION) + s += '' % ENTRY + return s + + +class Scope(object): + + # Map from Scope type.lower() to lower-cased list of allowed sub-elems. + ALLOWED_SCOPE_TYPE_SUB_ELEMS = { + ALL_AUTHENTICATED_USERS.lower() : [], + ALL_USERS.lower() : [], + GROUP_BY_DOMAIN.lower() : [DOMAIN.lower()], + GROUP_BY_EMAIL.lower() : [ + DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], + GROUP_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()], + USER_BY_EMAIL.lower() : [ + DISPLAY_NAME.lower(), EMAIL_ADDRESS.lower(), NAME.lower()], + USER_BY_ID.lower() : [DISPLAY_NAME.lower(), ID.lower(), NAME.lower()] + } + + def __init__(self, parent, type=None, id=None, name=None, + email_address=None, domain=None): + self.parent = parent + self.type = type + self.name = name + self.id = id + self.domain = domain + self.email_address = email_address + if self.type.lower() not in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS: + raise InvalidAclError('Invalid %s %s "%s" ' % + (SCOPE, TYPE, self.type)) + + def __repr__(self): + named_entity = None + if self.id: + named_entity = self.id + elif self.email_address: + named_entity = self.email_address + elif self.domain: + named_entity = self.domain + if named_entity: + return '<%s: %s>' % (self.type, named_entity) + else: + return '<%s>' % self.type + + def startElement(self, name, attrs, connection): + if (not name.lower() in + self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type.lower()]): + raise InvalidAclError('Element "%s" not allowed in %s %s "%s" ' % + (name, SCOPE, TYPE, self.type)) + return None + + def endElement(self, name, value, connection): + value = value.strip() + if name.lower() == DOMAIN.lower(): + self.domain = value + elif name.lower() == EMAIL_ADDRESS.lower(): + self.email_address = value + elif name.lower() == ID.lower(): + self.id = value + elif name.lower() == NAME.lower(): + self.name = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '<%s type="%s">' % (SCOPE, self.type) + if (self.type.lower() == ALL_AUTHENTICATED_USERS.lower() + or self.type.lower() == ALL_USERS.lower()): + pass + elif self.type.lower() == GROUP_BY_DOMAIN.lower(): + s += '<%s>%s' % (DOMAIN, self.domain, DOMAIN) + elif (self.type.lower() == GROUP_BY_EMAIL.lower() + or self.type.lower() == USER_BY_EMAIL.lower()): + s += '<%s>%s' % (EMAIL_ADDRESS, self.email_address, + EMAIL_ADDRESS) + if self.name: + s += '<%s>%s' % (NAME, self.name, NAME) + elif (self.type.lower() == GROUP_BY_ID.lower() + or self.type.lower() == USER_BY_ID.lower()): + s += '<%s>%s' % (ID, self.id, ID) + if self.name: + s += '<%s>%s' % (NAME, self.name, NAME) + else: + raise InvalidAclError('Invalid scope type "%s" ', self.type) + + s += '' % SCOPE + return s diff --git a/ext/boto/gs/bucket.py b/ext/boto/gs/bucket.py new file mode 100644 index 0000000000..67c601e662 --- /dev/null +++ b/ext/boto/gs/bucket.py @@ -0,0 +1,1001 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import re +import urllib +import xml.sax + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.exception import GSResponseError +from boto.exception import InvalidAclError +from boto.gs.acl import ACL, CannedACLStrings +from boto.gs.acl import SupportedPermissions as GSPermissions +from boto.gs.bucketlistresultset import VersionedBucketListResultSet +from boto.gs.cors import Cors +from boto.gs.lifecycle import LifecycleConfig +from boto.gs.key import Key as GSKey +from boto.s3.acl import Policy +from boto.s3.bucket import Bucket as S3Bucket +from boto.utils import get_utf8_value +from boto.compat import six + +# constants for http query args +DEF_OBJ_ACL = 'defaultObjectAcl' +STANDARD_ACL = 'acl' +CORS_ARG = 'cors' +LIFECYCLE_ARG = 'lifecycle' +STORAGE_CLASS_ARG='storageClass' +ERROR_DETAILS_REGEX = re.compile(r'
    (?P
    .*)
    ') + +class Bucket(S3Bucket): + """Represents a Google Cloud Storage bucket.""" + + StorageClassBody = ('\n' + '%s') + VersioningBody = ('\n' + '%s' + '') + WebsiteBody = ('\n' + '%s%s') + WebsiteMainPageFragment = '%s' + WebsiteErrorFragment = '%s' + + def __init__(self, connection=None, name=None, key_class=GSKey): + super(Bucket, self).__init__(connection, name, key_class) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def get_key(self, key_name, headers=None, version_id=None, + response_headers=None, generation=None): + """Returns a Key instance for an object in this bucket. + + Note that this method uses a HEAD request to check for the existence of + the key. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/06N3b for details. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: A specific generation number to fetch the key at. If + not specified, the latest generation is fetched. + + :rtype: :class:`boto.gs.key.Key` + :returns: A Key object from this bucket. + """ + query_args_l = [] + if generation: + query_args_l.append('generation=%s' % generation) + if response_headers: + for rk, rv in six.iteritems(response_headers): + query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) + try: + key, resp = self._get_key_internal(key_name, headers, + query_args_l=query_args_l) + except GSResponseError as e: + if e.status == 403 and 'Forbidden' in e.reason: + # If we failed getting an object, let the user know which object + # failed rather than just returning a generic 403. + e.reason = ("Access denied to 'gs://%s/%s'." % + (self.name, key_name)) + raise + return key + + def copy_key(self, new_key_name, src_bucket_name, src_key_name, + metadata=None, src_version_id=None, storage_class='STANDARD', + preserve_acl=False, encrypt_key=False, headers=None, + query_args=None, src_generation=None): + """Create a new key in the bucket by copying an existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_generation: int + :param src_generation: The generation number of the source key to copy. + If not specified, the latest generation is copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type storage_class: string + :param storage_class: The storage class of the new key. By + default, the new key will use the standard storage class. + Possible values are: STANDARD | DURABLE_REDUCED_AVAILABILITY + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to GCS, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL (or if you have a default ACL set + on the bucket), a value of False will be significantly more + efficient. + + :type encrypt_key: bool + :param encrypt_key: Included for compatibility with S3. This argument is + ignored. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type query_args: string + :param query_args: A string of additional querystring arguments + to append to the request + + :rtype: :class:`boto.gs.key.Key` + :returns: An instance of the newly created key object + """ + if src_generation: + headers = headers or {} + headers['x-goog-copy-source-generation'] = str(src_generation) + return super(Bucket, self).copy_key( + new_key_name, src_bucket_name, src_key_name, metadata=metadata, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args) + + def list_versions(self, prefix='', delimiter='', marker='', + generation_marker='', headers=None): + """ + List versioned objects within a bucket. This returns an + instance of an VersionedBucketListResultSet that automatically + handles all of the result paging, etc. from GCS. You just need + to keep iterating until there are no more results. Called + with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + https://developers.google.com/storage/docs/reference-headers#delimiter + for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type generation_marker: string + :param generation_marker: The "generation marker" of where you are in + the result set. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :rtype: + :class:`boto.gs.bucketlistresultset.VersionedBucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc. + """ + return VersionedBucketListResultSet(self, prefix, delimiter, + marker, generation_marker, + headers) + + def validate_get_all_versions_params(self, params): + """ + See documentation in boto/s3/bucket.py. + """ + self.validate_kwarg_names(params, + ['version_id_marker', 'delimiter', 'marker', + 'generation_marker', 'prefix', 'max_keys']) + + def delete_key(self, key_name, headers=None, version_id=None, + mfa_token=None, generation=None): + """ + Deletes a key from the bucket. + + :type key_name: string + :param key_name: The key name to delete + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type mfa_token: tuple or list of strings + :param mfa_token: Unused in this subclass. + + :type generation: int + :param generation: The generation number of the key to delete. If not + specified, the latest generation number will be deleted. + + :rtype: :class:`boto.gs.key.Key` + :returns: A key object holding information on what was + deleted. + """ + query_args_l = [] + if generation: + query_args_l.append('generation=%s' % generation) + self._delete_key_internal(key_name, headers=headers, + version_id=version_id, mfa_token=mfa_token, + query_args_l=query_args_l) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None, + generation=None, if_generation=None, if_metageneration=None): + """Sets or changes a bucket's or key's ACL. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if isinstance(acl_or_str, Policy): + raise InvalidAclError('Attempt to set S3 Policy on GS ACL') + elif isinstance(acl_or_str, ACL): + self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + else: + self.set_canned_acl(acl_or_str, key_name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_def_acl(self, acl_or_str, headers=None): + """Sets or changes a bucket's default ACL. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + if isinstance(acl_or_str, Policy): + raise InvalidAclError('Attempt to set S3 Policy on GS ACL') + elif isinstance(acl_or_str, ACL): + self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers) + else: + self.set_def_canned_acl(acl_or_str, headers=headers) + + def _get_xml_acl_helper(self, key_name, headers, query_args): + """Provides common functionality for get_xml_acl and _get_acl_helper.""" + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + if response.status == 403: + match = ERROR_DETAILS_REGEX.search(body) + details = match.group('details') if match else None + if details: + details = (('
    %s. Note that Full Control access' + ' is required to access ACLs.
    ') % + details) + body = re.sub(ERROR_DETAILS_REGEX, details, body) + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def _get_acl_helper(self, key_name, headers, query_args): + """Provides common functionality for get_acl and get_def_acl.""" + body = self._get_xml_acl_helper(key_name, headers, query_args) + acl = ACL(self) + h = handler.XmlHandler(acl, self) + xml.sax.parseString(body, h) + return acl + + def get_acl(self, key_name='', headers=None, version_id=None, + generation=None): + """Returns the ACL of the bucket or an object in the bucket. + + :param str key_name: The name of the object to get the ACL for. If not + specified, the ACL for the bucket will be returned. + + :param dict headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. This parameter is only valid when retrieving + the ACL of an object, not a bucket. + + :rtype: :class:`.gs.acl.ACL` + """ + query_args = STANDARD_ACL + if generation: + query_args += '&generation=%s' % generation + return self._get_acl_helper(key_name, headers, query_args) + + def get_xml_acl(self, key_name='', headers=None, version_id=None, + generation=None): + """Returns the ACL string of the bucket or an object in the bucket. + + :param str key_name: The name of the object to get the ACL for. If not + specified, the ACL for the bucket will be returned. + + :param dict headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. This parameter is only valid when retrieving + the ACL of an object, not a bucket. + + :rtype: str + """ + query_args = STANDARD_ACL + if generation: + query_args += '&generation=%s' % generation + return self._get_xml_acl_helper(key_name, headers, query_args) + + def get_def_acl(self, headers=None): + """Returns the bucket's default ACL. + + :param dict headers: Additional headers to set during the request. + + :rtype: :class:`.gs.acl.ACL` + """ + return self._get_acl_helper('', headers, DEF_OBJ_ACL) + + def _set_acl_helper(self, acl_or_str, key_name, headers, query_args, + generation, if_generation, if_metageneration, + canned=False): + """Provides common functionality for set_acl, set_xml_acl, + set_canned_acl, set_def_acl, set_def_xml_acl, and + set_def_canned_acl().""" + + headers = headers or {} + data = '' + if canned: + headers[self.connection.provider.acl_header] = acl_or_str + else: + data = acl_or_str + + if generation: + query_args += '&generation=%s' % generation + + if if_metageneration is not None and if_generation is None: + raise ValueError("Received if_metageneration argument with no " + "if_generation argument. A metageneration has no " + "meaning without a content generation.") + if not key_name and (if_generation or if_metageneration): + raise ValueError("Received if_generation or if_metageneration " + "parameter while setting the ACL of a bucket.") + if if_generation is not None: + headers['x-goog-if-generation-match'] = str(if_generation) + if if_metageneration is not None: + headers['x-goog-if-metageneration-match'] = str(if_metageneration) + + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), get_utf8_value(key_name), + data=get_utf8_value(data), headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, + query_args='acl', generation=None, if_generation=None, + if_metageneration=None): + """Sets a bucket's or objects's ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type query_args: str + :param query_args: The query parameters to pass with the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + return self._set_acl_helper(acl_str, key_name=key_name, headers=headers, + query_args=query_args, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None, generation=None, if_generation=None, + if_metageneration=None): + """Sets a bucket's or objects's ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type key_name: string + :param key_name: A key name within the bucket to set the ACL for. If not + specified, the ACL for the bucket will be set. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type version_id: string + :param version_id: Unused in this subclass. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if acl_str not in CannedACLStrings: + raise ValueError("Provided canned ACL string (%s) is not valid." + % acl_str) + query_args = STANDARD_ACL + return self._set_acl_helper(acl_str, key_name, headers, query_args, + generation, if_generation, + if_metageneration, canned=True) + + def set_def_canned_acl(self, acl_str, headers=None): + """Sets a bucket's default ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + if acl_str not in CannedACLStrings: + raise ValueError("Provided canned ACL string (%s) is not valid." + % acl_str) + query_args = DEF_OBJ_ACL + return self._set_acl_helper(acl_str, '', headers, query_args, + generation=None, if_generation=None, + if_metageneration=None, canned=True) + + def set_def_xml_acl(self, acl_str, headers=None): + """Sets a bucket's default ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type headers: dict + :param headers: Additional headers to set during the request. + """ + return self.set_xml_acl(acl_str, '', headers, + query_args=DEF_OBJ_ACL) + + def get_cors(self, headers=None): + """Returns a bucket's CORS XML document. + + :param dict headers: Additional headers to send with the request. + :rtype: :class:`~.cors.Cors` + """ + response = self.connection.make_request('GET', self.name, + query_args=CORS_ARG, + headers=headers) + body = response.read() + if response.status == 200: + # Success - parse XML and return Cors object. + cors = Cors() + h = handler.XmlHandler(cors, self) + xml.sax.parseString(body, h) + return cors + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors(self, cors, headers=None): + """Sets a bucket's CORS XML document. + + :param str cors: A string containing the CORS XML. + :param dict headers: Additional headers to send with the request. + """ + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(cors), + query_args=CORS_ARG, headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_storage_class(self): + """ + Returns the StorageClass for the bucket. + + :rtype: str + :return: The StorageClass for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args=STORAGE_CLASS_ARG) + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs.StorageClass + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_storage_class(self, storage_class, headers=None): + """ + Sets a bucket's storage class. + + :param str storage_class: A string containing the storage class. + :param dict headers: Additional headers to send with the request. + """ + req_body = self.StorageClassBody % (get_utf8_value(storage_class)) + self.set_subresource(STORAGE_CLASS_ARG, req_body, headers=headers) + + # Method with same signature as boto.s3.bucket.Bucket.add_email_grant(), + # to allow polymorphic treatment at application layer. + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the GS + account your are granting the permission to. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + # Method with same signature as boto.s3.bucket.Bucket.add_user_grant(), + # to allow polymorphic treatment at application layer. + def add_user_grant(self, permission, user_id, recursive=False, + headers=None): + """ + Convenience method that provides a quick way to add a canonical user + grant to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUTs the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ|WRITE|FULL_CONTROL) + + :type user_id: string + :param user_id: The canonical user id associated with the GS account + you are granting the permission to. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_user_grant(permission, user_id) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers) + + def add_group_email_grant(self, permission, email_address, recursive=False, + headers=None): + """ + Convenience method that provides a quick way to add an email group + grant to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GCS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|WRITE|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + Group to which you are granting the permission. + + :type recursive: bool + :param recursive: A boolean value to controls whether the call + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in GSPermissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + acl = self.get_acl(headers=headers) + acl.add_group_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + if recursive: + for key in self: + key.add_group_email_grant(permission, email_address, + headers=headers) + + # Method with same input signature as boto.s3.bucket.Bucket.list_grants() + # (but returning different object type), to allow polymorphic treatment + # at application layer. + def list_grants(self, headers=None): + """Returns the ACL entries applied to this bucket. + + :param dict headers: Additional headers to send with the request. + :rtype: list containing :class:`~.gs.acl.Entry` objects. + """ + acl = self.get_acl(headers=headers) + return acl.entries + + def disable_logging(self, headers=None): + """Disable logging on this bucket. + + :param dict headers: Additional headers to send with the request. + """ + xml_str = '' + self.set_subresource('logging', xml_str, headers=headers) + + def enable_logging(self, target_bucket, target_prefix=None, headers=None): + """Enable logging on a bucket. + + :type target_bucket: bucket or string + :param target_bucket: The bucket to log to. + + :type target_prefix: string + :param target_prefix: The prefix which should be prepended to the + generated log files written to the target_bucket. + + :param dict headers: Additional headers to send with the request. + """ + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + xml_str = '' + xml_str = (xml_str + '%s' % target_bucket) + if target_prefix: + xml_str = (xml_str + + '%s' % target_prefix) + xml_str = xml_str + '' + + self.set_subresource('logging', xml_str, headers=headers) + + def get_logging_config_with_xml(self, headers=None): + """Returns the current status of logging configuration on the bucket as + unparsed XML. + + :param dict headers: Additional headers to send with the request. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * Logging + + * LogObjectPrefix: Prefix that is prepended to log objects. + * LogBucket: Target bucket for log objects. + + 2) Unparsed XML describing the bucket's logging configuration. + """ + response = self.connection.make_request('GET', self.name, + query_args='logging', + headers=headers) + body = response.read() + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def get_logging_config(self, headers=None): + """Returns the current status of logging configuration on the bucket. + + :param dict headers: Additional headers to send with the request. + + :rtype: dict + :returns: A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * Logging + + * LogObjectPrefix: Prefix that is prepended to log objects. + * LogBucket: Target bucket for log objects. + """ + return self.get_logging_config_with_xml(headers)[0] + + def configure_website(self, main_page_suffix=None, error_key=None, + headers=None): + """Configure this bucket to act as a website + + :type main_page_suffix: str + :param main_page_suffix: Suffix that is appended to a request that is + for a "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ the data + that is returned will be for the object with the key name + images/index.html). The suffix must not be empty and must not + include a slash character. This parameter is optional and the + property is disabled if excluded. + + :type error_key: str + :param error_key: The object key name to use when a 400 error occurs. + This parameter is optional and the property is disabled if excluded. + + :param dict headers: Additional headers to send with the request. + """ + if main_page_suffix: + main_page_frag = self.WebsiteMainPageFragment % main_page_suffix + else: + main_page_frag = '' + + if error_key: + error_frag = self.WebsiteErrorFragment % error_key + else: + error_frag = '' + + body = self.WebsiteBody % (main_page_frag, error_frag) + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(body), + query_args='websiteConfig', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_configuration(self, headers=None): + """Returns the current status of website configuration on the bucket. + + :param dict headers: Additional headers to send with the request. + + :rtype: dict + :returns: A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * WebsiteConfiguration + + * MainPageSuffix: suffix that is appended to request that + is for a "directory" on the website endpoint. + * NotFoundPage: name of an object to serve when site visitors + encounter a 404. + """ + return self.get_website_configuration_with_xml(headers)[0] + + def get_website_configuration_with_xml(self, headers=None): + """Returns the current status of website configuration on the bucket as + unparsed XML. + + :param dict headers: Additional headers to send with the request. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing the parsed XML response from GCS. The + overall structure is: + + * WebsiteConfiguration + + * MainPageSuffix: suffix that is appended to request that is for + a "directory" on the website endpoint. + * NotFoundPage: name of an object to serve when site visitors + encounter a 404 + + 2) Unparsed XML describing the bucket's website configuration. + """ + response = self.connection.make_request('GET', self.name, + query_args='websiteConfig', headers=headers) + body = response.read() + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def delete_website_configuration(self, headers=None): + """Remove the website configuration from this bucket. + + :param dict headers: Additional headers to send with the request. + """ + self.configure_website(headers=headers) + + def get_versioning_status(self, headers=None): + """Returns the current status of versioning configuration on the bucket. + + :rtype: bool + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + resp_json = boto.jsonresponse.Element() + boto.jsonresponse.XmlHandler(resp_json, None).parse(body) + resp_json = resp_json['VersioningConfiguration'] + return ('Status' in resp_json) and (resp_json['Status'] == 'Enabled') + + def configure_versioning(self, enabled, headers=None): + """Configure versioning for this bucket. + + :param bool enabled: If set to True, enables versioning on this bucket. + If set to False, disables versioning. + + :param dict headers: Additional headers to send with the request. + """ + if enabled == True: + req_body = self.VersioningBody % ('Enabled') + else: + req_body = self.VersioningBody % ('Suspended') + self.set_subresource('versioning', req_body, headers=headers) + + def get_lifecycle_config(self, headers=None): + """ + Returns the current lifecycle configuration on the bucket. + + :rtype: :class:`boto.gs.lifecycle.LifecycleConfig` + :returns: A LifecycleConfig object that describes all current + lifecycle rules in effect for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args=LIFECYCLE_ARG, headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + lifecycle_config = LifecycleConfig() + h = handler.XmlHandler(lifecycle_config, self) + xml.sax.parseString(body, h) + return lifecycle_config + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_lifecycle(self, lifecycle_config, headers=None): + """ + Configure lifecycle for this bucket. + + :type lifecycle_config: :class:`boto.gs.lifecycle.LifecycleConfig` + :param lifecycle_config: The lifecycle configuration you want + to configure for this bucket. + """ + xml = lifecycle_config.to_xml() + response = self.connection.make_request( + 'PUT', get_utf8_value(self.name), data=get_utf8_value(xml), + query_args=LIFECYCLE_ARG, headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) diff --git a/ext/boto/gs/bucketlistresultset.py b/ext/boto/gs/bucketlistresultset.py new file mode 100644 index 0000000000..db634cfd45 --- /dev/null +++ b/ext/boto/gs/bucketlistresultset.py @@ -0,0 +1,64 @@ +# Copyright 2012 Google Inc. +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + marker='', generation_marker='', headers=None): + """ + A generator function for listing versioned objects. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, marker=marker, + generation_marker=generation_marker, + delimiter=delimiter, headers=headers, + max_keys=999) + for k in rs: + yield k + marker = rs.next_marker + generation_marker = rs.next_generation_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet(object): + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from GCS so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', + generation_marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.generation_marker = generation_marker + self.headers = headers + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + marker=self.marker, + generation_marker=self.generation_marker, + headers=self.headers) diff --git a/ext/boto/gs/connection.py b/ext/boto/gs/connection.py new file mode 100644 index 0000000000..9a2e4a2bbb --- /dev/null +++ b/ext/boto/gs/connection.py @@ -0,0 +1,129 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.gs.bucket import Bucket +from boto.s3.connection import S3Connection +from boto.s3.connection import SubdomainCallingFormat +from boto.s3.connection import check_lowercase_bucketname +from boto.utils import get_utf8_value + +class Location(object): + DEFAULT = 'US' + EU = 'EU' + +class GSConnection(S3Connection): + + DefaultHost = 'storage.googleapis.com' + QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s' + + def __init__(self, gs_access_key_id=None, gs_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=DefaultHost, debug=0, https_connection_factory=None, + calling_format=SubdomainCallingFormat(), path='/', + suppress_consec_slashes=True): + super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, calling_format, path, + "google", Bucket, + suppress_consec_slashes=suppress_consec_slashes) + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None, + storage_class='STANDARD'): + """ + Creates a new bucket. By default it's located in the USA. You can + pass Location.EU to create bucket in the EU. You can also pass + a LocationConstraint for where the bucket should be located, and + a StorageClass describing how the data should be stored. + + :type bucket_name: string + :param bucket_name: The name of the new bucket. + + :type headers: dict + :param headers: Additional headers to pass along with the request to GCS. + + :type location: :class:`boto.gs.connection.Location` + :param location: The location of the new bucket. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GCS. + + :type storage_class: string + :param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'. + + """ + check_lowercase_bucketname(bucket_name) + + if policy: + if headers: + headers[self.provider.acl_header] = policy + else: + headers = {self.provider.acl_header : policy} + if not location: + location = Location.DEFAULT + location_elem = ('%s' + % location) + if storage_class: + storage_class_elem = ('%s' + % storage_class) + else: + storage_class_elem = '' + data = ('%s%s' + % (location_elem, storage_class_elem)) + response = self.make_request( + 'PUT', get_utf8_value(bucket_name), headers=headers, + data=get_utf8_value(data)) + body = response.read() + if response.status == 409: + raise self.provider.storage_create_error( + response.status, response.reason, body) + if response.status == 200: + return self.bucket_class(self, bucket_name) + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + bucket = self.bucket_class(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket diff --git a/ext/boto/gs/cors.py b/ext/boto/gs/cors.py new file mode 100644 index 0000000000..1c5cfd0c7b --- /dev/null +++ b/ext/boto/gs/cors.py @@ -0,0 +1,169 @@ +# Copyright 2012 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import types +from boto.gs.user import User +from boto.exception import InvalidCorsError +from xml.sax import handler + +# Relevant tags for the CORS XML document. +CORS_CONFIG = 'CorsConfig' +CORS = 'Cors' +ORIGINS = 'Origins' +ORIGIN = 'Origin' +METHODS = 'Methods' +METHOD = 'Method' +HEADERS = 'ResponseHeaders' +HEADER = 'ResponseHeader' +MAXAGESEC = 'MaxAgeSec' + +class Cors(handler.ContentHandler): + """Encapsulates the CORS configuration XML document""" + def __init__(self): + # List of CORS elements found within a CorsConfig element. + self.cors = [] + # List of collections (e.g. Methods, ResponseHeaders, Origins) + # found within a CORS element. We use a list of lists here + # instead of a dictionary because the collections need to be + # preserved in the order in which they appear in the input XML + # document (and Python dictionary keys are inherently unordered). + # The elements on this list are two element tuples of the form + # (collection name, [list of collection contents]). + self.collections = [] + # Lists of elements within a collection. Again a list is needed to + # preserve ordering but also because the same element may appear + # multiple times within a collection. + self.elements = [] + # Dictionary mapping supported collection names to element types + # which may be contained within each. + self.legal_collections = { + ORIGINS : [ORIGIN], + METHODS : [METHOD], + HEADERS : [HEADER], + MAXAGESEC: [] + } + # List of supported element types within any collection, used for + # checking validadity of a parsed element name. + self.legal_elements = [ORIGIN, METHOD, HEADER] + + self.parse_level = 0 + self.collection = None + self.element = None + + def validateParseLevel(self, tag, level): + """Verify parse level for a given tag.""" + if self.parse_level != level: + raise InvalidCorsError('Invalid tag %s at parse level %d: ' % + (tag, self.parse_level)) + + def startElement(self, name, attrs, connection): + """SAX XML logic for parsing new element found.""" + if name == CORS_CONFIG: + self.validateParseLevel(name, 0) + self.parse_level += 1; + elif name == CORS: + self.validateParseLevel(name, 1) + self.parse_level += 1; + elif name in self.legal_collections: + self.validateParseLevel(name, 2) + self.parse_level += 1; + self.collection = name + elif name in self.legal_elements: + self.validateParseLevel(name, 3) + # Make sure this tag is found inside a collection tag. + if self.collection is None: + raise InvalidCorsError('Tag %s found outside collection' % name) + # Make sure this tag is allowed for the current collection tag. + if name not in self.legal_collections[self.collection]: + raise InvalidCorsError('Tag %s not allowed in %s collection' % + (name, self.collection)) + self.element = name + else: + raise InvalidCorsError('Unsupported tag ' + name) + + def endElement(self, name, value, connection): + """SAX XML logic for parsing new element found.""" + if name == CORS_CONFIG: + self.validateParseLevel(name, 1) + self.parse_level -= 1; + elif name == CORS: + self.validateParseLevel(name, 2) + self.parse_level -= 1; + # Terminating a CORS element, save any collections we found + # and re-initialize collections list. + self.cors.append(self.collections) + self.collections = [] + elif name in self.legal_collections: + self.validateParseLevel(name, 3) + if name != self.collection: + raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % + (self.collection, name)) + self.parse_level -= 1; + if not self.legal_collections[name]: + # If this collection doesn't contain any sub-elements, store + # a tuple of name and this tag's element value. + self.collections.append((name, value.strip())) + else: + # Otherwise, we're terminating a collection of sub-elements, + # so store a tuple of name and list of contained elements. + self.collections.append((name, self.elements)) + self.elements = [] + self.collection = None + elif name in self.legal_elements: + self.validateParseLevel(name, 3) + # Make sure this tag is found inside a collection tag. + if self.collection is None: + raise InvalidCorsError('Tag %s found outside collection' % name) + # Make sure this end tag is allowed for the current collection tag. + if name not in self.legal_collections[self.collection]: + raise InvalidCorsError('Tag %s not allowed in %s collection' % + (name, self.collection)) + if name != self.element: + raise InvalidCorsError('Mismatched start and end tags (%s/%s)' % + (self.element, name)) + # Terminating an element tag, add it to the list of elements + # for the current collection. + self.elements.append((name, value.strip())) + self.element = None + else: + raise InvalidCorsError('Unsupported end tag ' + name) + + def to_xml(self): + """Convert CORS object into XML string representation.""" + s = '<' + CORS_CONFIG + '>' + for collections in self.cors: + s += '<' + CORS + '>' + for (collection, elements_or_value) in collections: + assert collection is not None + s += '<' + collection + '>' + # If collection elements has type string, append atomic value, + # otherwise, append sequence of values in named tags. + if isinstance(elements_or_value, str): + s += elements_or_value + else: + for (name, value) in elements_or_value: + assert name is not None + assert value is not None + s += '<' + name + '>' + value + '' + s += '' + s += '' + s += '' + return s diff --git a/ext/boto/gs/key.py b/ext/boto/gs/key.py new file mode 100644 index 0000000000..f1ea3e1605 --- /dev/null +++ b/ext/boto/gs/key.py @@ -0,0 +1,948 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import base64 +import binascii +import os +import re + +from boto.compat import StringIO +from boto.exception import BotoClientError +from boto.s3.key import Key as S3Key +from boto.s3.keyfile import KeyFile +from boto.utils import compute_hash +from boto.utils import get_utf8_value + +class Key(S3Key): + """ + Represents a key (object) in a GS bucket. + + :ivar bucket: The parent :class:`boto.gs.bucket.Bucket`. + :ivar name: The name of this Key object. + :ivar metadata: A dictionary containing user metadata that you + wish to store with the object or that has been retrieved from + an existing object. + :ivar cache_control: The value of the `Cache-Control` HTTP header. + :ivar content_type: The value of the `Content-Type` HTTP header. + :ivar content_encoding: The value of the `Content-Encoding` HTTP header. + :ivar content_disposition: The value of the `Content-Disposition` HTTP + header. + :ivar content_language: The value of the `Content-Language` HTTP header. + :ivar etag: The `etag` associated with this object. + :ivar last_modified: The string timestamp representing the last + time this object was modified in GS. + :ivar owner: The ID of the owner of this object. + :ivar storage_class: The storage class of the object. Currently, one of: + STANDARD | DURABLE_REDUCED_AVAILABILITY. + :ivar md5: The MD5 hash of the contents of the object. + :ivar size: The size, in bytes, of the object. + :ivar generation: The generation number of the object. + :ivar metageneration: The generation number of the object metadata. + :ivar encrypted: Whether the object is encrypted while at rest on + the server. + :ivar cloud_hashes: Dictionary of checksums as supplied by the storage + provider. + """ + + def __init__(self, bucket=None, name=None, generation=None): + super(Key, self).__init__(bucket=bucket, name=name) + self.generation = generation + self.meta_generation = None + self.cloud_hashes = {} + self.component_count = None + + def __repr__(self): + if self.generation and self.metageneration: + ver_str = '#%s.%s' % (self.generation, self.metageneration) + else: + ver_str = '' + if self.bucket: + return '' % (self.bucket.name, self.name, ver_str) + else: + return '' % (self.name, ver_str) + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'ETag': + self.etag = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + elif name == 'Generation': + self.generation = value + elif name == 'MetaGeneration': + self.metageneration = value + else: + setattr(self, name, value) + + def handle_version_headers(self, resp, force=False): + self.metageneration = resp.getheader('x-goog-metageneration', None) + self.generation = resp.getheader('x-goog-generation', None) + + def handle_restore_headers(self, response): + return + + def handle_addl_headers(self, headers): + for key, value in headers: + if key == 'x-goog-hash': + for hash_pair in value.split(','): + alg, b64_digest = hash_pair.strip().split('=', 1) + self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest) + elif key == 'x-goog-component-count': + self.component_count = int(value) + elif key == 'x-goog-generation': + self.generation = value + # Use x-goog-stored-content-encoding and + # x-goog-stored-content-length to indicate original content length + # and encoding, which are transcoding-invariant (so are preferable + # over using content-encoding and size headers). + elif key == 'x-goog-stored-content-encoding': + self.content_encoding = value + elif key == 'x-goog-stored-content-length': + self.size = int(value) + elif key == 'x-goog-storage-class': + self.storage_class = value + + def open_read(self, headers=None, query_args='', + override_num_retries=None, response_headers=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string + (ie, 'torrent') + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + # For GCS we need to include the object generation in the query args. + # The rest of the processing is handled in the parent class. + if self.generation: + if query_args: + query_args += '&' + query_args += 'generation=%s' % self.generation + super(Key, self).open_read(headers=headers, query_args=query_args, + override_num_retries=override_num_retries, + response_headers=response_headers) + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None, hash_algs=None): + query_args = None + if self.generation: + query_args = ['generation=%s' % self.generation] + self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + override_num_retries=override_num_retries, + response_headers=response_headers, + hash_algs=hash_algs, + query_args=query_args) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None, + hash_algs=None): + """ + Retrieve an object from GCS using the name of the Key object as the + key in GCS. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with + the GET request. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to GCS and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent + file as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/sMkcC for details. + """ + if self.bucket is not None: + if res_download_handler: + res_download_handler.get_file(self, fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id, + hash_algs=hash_algs) + else: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers, + hash_algs=hash_algs) + + def compute_hash(self, fp, algorithm, size=None): + """ + :type fp: file + :param fp: File pointer to the file to hash. The file + pointer will be reset to the same position before the + method returns. + + :type algorithm: zero-argument constructor for hash objects that + implements update() and digest() (e.g. hashlib.md5) + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being split + in place into different parts. Less bytes may be available. + """ + hex_digest, b64_digest, data_size = compute_hash( + fp, size=size, hash_algorithm=algorithm) + # The internal implementation of compute_hash() needs to return the + # data size, but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code), so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = data_size + return (hex_digest, b64_digest) + + def send_file(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None, + hash_algs=None): + """ + Upload a file to GCS. + + :type fp: file + :param fp: The file pointer to upload. The file pointer must + point at the offset from which you wish to upload. + ie. if uploading the full file, it should point at the + start of the file. Normally when a file is opened for + reading, the fp will point at the first byte. See the + bytes parameter below for more info. + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file + transfer. Providing a negative integer will cause your + callback to be called with each buffer read. + + :type query_args: string + :param query_args: Arguments to pass in the query string. + + :type chunked_transfer: boolean + :param chunked_transfer: (optional) If true, we use chunked + Transfer-Encoding. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary of hash algorithms and + corresponding hashing class that implements update() and digest(). + Defaults to {'md5': hashlib.md5}. + """ + self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size, + hash_algs=hash_algs) + + def delete(self, headers=None): + return self.bucket.delete_key(self.name, version_id=self.version_id, + generation=self.generation, + headers=headers) + + def add_email_grant(self, permission, email_address): + """ + Convenience method that provides a quick way to add an email grant to a + key. This method retrieves the current ACL, creates a new grant based on + the parameters passed in, adds that grant to the ACL and then PUT's the + new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + account to which you are granting the permission. + """ + acl = self.get_acl() + acl.add_email_grant(permission, email_address) + self.set_acl(acl) + + def add_user_grant(self, permission, user_id): + """ + Convenience method that provides a quick way to add a canonical user + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type user_id: string + :param user_id: The canonical user id associated with the GS account to + which you are granting the permission. + """ + acl = self.get_acl() + acl.add_user_grant(permission, user_id) + self.set_acl(acl) + + def add_group_email_grant(self, permission, email_address, headers=None): + """ + Convenience method that provides a quick way to add an email group + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the Google + Group to which you are granting the permission. + """ + acl = self.get_acl(headers=headers) + acl.add_group_email_grant(permission, email_address) + self.set_acl(acl, headers=headers) + + def add_group_grant(self, permission, group_id): + """ + Convenience method that provides a quick way to add a canonical group + grant to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL and + then PUT's the new ACL back to GS. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|FULL_CONTROL + See http://code.google.com/apis/storage/docs/developer-guide.html#authorization + for more details on permissions. + + :type group_id: string + :param group_id: The canonical group id associated with the Google + Groups account you are granting the permission to. + """ + acl = self.get_acl() + acl.add_group_grant(permission, group_id) + self.set_acl(acl) + + def set_contents_from_file(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + res_upload_handler=None, size=None, rewind=False, + if_generation=None): + """ + Store an object in GS using the name of the Key object as the + key in GS and the contents of the file pointed to by 'fp' as the + contents. + + :type fp: file + :param fp: The file whose contents are to be uploaded. + + :type headers: dict + :param headers: (optional) Additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: (optional) If this parameter is False, the method will + first check to see if an object exists in the bucket with the same + key. If it does, it won't overwrite it. The default value is True + which will overwrite the object. + + :type cb: function + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter, this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from the + file pointer (fp). This is useful when uploading a file in multiple + parts where you are splitting the file up into different ranges to + be uploaded. If not specified, the default behaviour is to read all + bytes from the file pointer. Less bytes may be available. + + Notes: + + 1. The "size" parameter currently cannot be used when a + resumable upload handler is given but is still useful for + uploading part of a file as implemented by the parent class. + 2. At present Google Cloud Storage does not support multipart + uploads. + + :type rewind: bool + :param rewind: (optional) If True, the file pointer (fp) will be + rewound to the start before any bytes are read from it. The default + behaviour is False which reads from the current position of the + file pointer (fp). + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + + :rtype: int + :return: The number of bytes written to the key. + + TODO: At some point we should refactor the Bucket and Key classes, + to move functionality common to all providers into a parent class, + and provider-specific functionality into subclasses (rather than + just overriding/sharing code the way it currently works). + """ + provider = self.bucket.connection.provider + if res_upload_handler and size: + # could use size instead of file_length if provided but... + raise BotoClientError( + '"size" param not supported for resumable uploads.') + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + + if rewind: + # caller requests reading from beginning of fp. + fp.seek(0, os.SEEK_SET) + else: + # The following seek/tell/seek logic is intended + # to detect applications using the older interface to + # set_contents_from_file(), which automatically rewound the + # file each time the Key was reused. This changed with commit + # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads + # split into multiple parts and uploaded in parallel, and at + # the time of that commit this check was added because otherwise + # older programs would get a success status and upload an empty + # object. Unfortuantely, it's very inefficient for fp's implemented + # by KeyFile (used, for example, by gsutil when copying between + # providers). So, we skip the check for the KeyFile case. + # TODO: At some point consider removing this seek/tell/seek + # logic, after enough time has passed that it's unlikely any + # programs remain that assume the older auto-rewind interface. + if not isinstance(fp, KeyFile): + spos = fp.tell() + fp.seek(0, os.SEEK_END) + if fp.tell() == spos: + fp.seek(0, os.SEEK_SET) + if fp.tell() != spos: + # Raise an exception as this is likely a programming + # error whereby there is data before the fp but nothing + # after it. + fp.seek(spos) + raise AttributeError('fp is at EOF. Use rewind option ' + 'or seek() to data start.') + # seek back to the correct position. + fp.seek(spos) + + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket is not None: + if isinstance(fp, KeyFile): + # Avoid EOF seek for KeyFile case as it's very inefficient. + key = fp.getkey() + size = key.size - fp.tell() + self.size = size + # At present both GCS and S3 use MD5 for the etag for + # non-multipart-uploaded objects. If the etag is 32 hex + # chars use it as an MD5, to avoid having to read the file + # twice while transferring. + if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): + etag = key.etag.strip('"') + md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) + if size: + self.size = size + else: + # If md5 is provided, still need to size so + # calculate based on bytes to end of content + spos = fp.tell() + fp.seek(0, os.SEEK_END) + self.size = fp.tell() - spos + fp.seek(spos) + size = self.size + + if md5 is None: + md5 = self.compute_md5(fp, size) + self.md5 = md5[0] + self.base64md5 = md5[1] + + if self.name is None: + self.name = self.md5 + + if not replace: + if self.bucket.lookup(self.name): + return + + if if_generation is not None: + headers['x-goog-if-generation-match'] = str(if_generation) + + if res_upload_handler: + res_upload_handler.send_file(self, fp, headers, cb, num_cb) + else: + # Not a resumable transfer so use basic send_file mechanism. + self.send_file(fp, headers, cb, num_cb, size=size) + + def set_contents_from_filename(self, filename, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=None, + res_upload_handler=None, + if_generation=None): + """ + Store an object in GS using the name of the Key object as the + key in GS and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto GS. + + :type headers: dict + :param headers: (optional) Additional headers to pass along with the + request to GS. + + :type replace: bool + :param replace: (optional) If True, replaces the contents of the file + if it already exists. + + :type cb: function + :param cb: (optional) Callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. + + :type policy: :py:attribute:`boto.gs.acl.CannedACLStrings` + :param policy: (optional) A canned ACL policy that will be applied to + the new key in GS. + + :type md5: tuple + :param md5: (optional) A tuple containing the hexdigest version of the + MD5 checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second element. + This is the same format returned by the compute_md5 method. + + If you need to compute the MD5 for any reason prior to upload, it's + silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be + computed. + + :type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler` + :param res_upload_handler: (optional) If provided, this handler will + perform the upload. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + # Clear out any previously computed hashes, since we are setting the + # content. + self.local_hashes = {} + + with open(filename, 'rb') as fp: + self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, res_upload_handler, + if_generation=if_generation) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + if_generation=None): + """ + Store an object in GCS using the name of the Key object as the + key in GCS and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if + it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept + two integer parameters, the first representing the + number of bytes that have been successfully + transmitted to GCS and the second representing the + size of the to be transmitted object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with + the cb parameter this parameter determines the + granularity of the callback by defining + the maximum number of times the callback will + be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in GCS. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the + second element. This is the same format returned by + the compute_md5 method. + :param md5: If you need to compute the MD5 for any reason prior + to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values + of the file. Otherwise, the checksum will be computed. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + + # Clear out any previously computed md5 hashes, since we are setting the content. + self.md5 = None + self.base64md5 = None + + fp = StringIO(get_utf8_value(s)) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, + if_generation=if_generation) + fp.close() + return r + + def set_contents_from_stream(self, *args, **kwargs): + """ + Store an object using the name of the Key object as the key in + cloud and the contents of the data stream pointed to by 'fp' as + the contents. + + The stream object is not seekable and total size is not known. + This has the implication that we can't specify the + Content-Size and Content-MD5 in the header. So for huge + uploads, the delay in calculating MD5 is avoided but with a + penalty of inability to verify the integrity of the uploaded + data. + + :type fp: file + :param fp: the file whose contents are to be uploaded + + :type headers: dict + :param headers: additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will first check + to see if an object exists in the bucket with the same key. If it + does, it won't overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter, this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GS. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading a + file in multiple parts where you are splitting the file up + into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the + object will only be written to if its current generation number is + this value. If set to the value 0, the object will only be written + if it doesn't already exist. + """ + if_generation = kwargs.pop('if_generation', None) + if if_generation is not None: + headers = kwargs.get('headers', {}) + headers['x-goog-if-generation-match'] = str(if_generation) + kwargs['headers'] = headers + super(Key, self).set_contents_from_stream(*args, **kwargs) + + def set_acl(self, acl_or_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets the ACL for this object. + + :type acl_or_str: string or :class:`boto.gs.acl.ACL` + :param acl_or_str: A canned ACL string (see + :data:`~.gs.acl.CannedACLStrings`) or an ACL object. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + self.bucket.set_acl(acl_or_str, self.name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def get_acl(self, headers=None, generation=None): + """Returns the ACL of this object. + + :param dict headers: Additional headers to set during the request. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. + + :rtype: :class:`.gs.acl.ACL` + """ + if self.bucket is not None: + return self.bucket.get_acl(self.name, headers=headers, + generation=generation) + + def get_xml_acl(self, headers=None, generation=None): + """Returns the ACL string of this object. + + :param dict headers: Additional headers to set during the request. + + :param int generation: If specified, gets the ACL for a specific + generation of a versioned object. If not specified, the current + version is returned. + + :rtype: str + """ + if self.bucket is not None: + return self.bucket.get_xml_acl(self.name, headers=headers, + generation=generation) + + def set_xml_acl(self, acl_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets this objects's ACL to an XML string. + + :type acl_str: string + :param acl_str: A string containing the ACL XML. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration) + + def set_canned_acl(self, acl_str, headers=None, generation=None, + if_generation=None, if_metageneration=None): + """Sets this objects's ACL using a predefined (canned) value. + + :type acl_str: string + :param acl_str: A canned ACL string. See + :data:`~.gs.acl.CannedACLStrings`. + + :type headers: dict + :param headers: Additional headers to set during the request. + + :type generation: int + :param generation: If specified, sets the ACL for a specific generation + of a versioned object. If not specified, the current version is + modified. + + :type if_generation: int + :param if_generation: (optional) If set to a generation number, the acl + will only be updated if its current generation number is this value. + + :type if_metageneration: int + :param if_metageneration: (optional) If set to a metageneration number, + the acl will only be updated if its current metageneration number is + this value. + """ + if self.bucket is not None: + return self.bucket.set_canned_acl( + acl_str, + self.name, + headers=headers, + generation=generation, + if_generation=if_generation, + if_metageneration=if_metageneration + ) + + def compose(self, components, content_type=None, headers=None): + """Create a new object from a sequence of existing objects. + + The content of the object representing this Key will be the + concatenation of the given object sequence. For more detail, visit + + https://developers.google.com/storage/docs/composite-objects + + :type components list of Keys + :param components List of gs.Keys representing the component objects + + :type content_type (optional) string + :param content_type Content type for the new composite object. + """ + compose_req = [] + for key in components: + if key.bucket.name != self.bucket.name: + raise BotoClientError( + 'GCS does not support inter-bucket composing') + + generation_tag = '' + if key.generation: + generation_tag = ('%s' + % str(key.generation)) + compose_req.append('%s%s' % + (key.name, generation_tag)) + compose_req_xml = ('%s' % + ''.join(compose_req)) + headers = headers or {} + if content_type: + headers['Content-Type'] = content_type + resp = self.bucket.connection.make_request( + 'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name), + headers=headers, query_args='compose', + data=get_utf8_value(compose_req_xml)) + if resp.status < 200 or resp.status > 299: + raise self.bucket.connection.provider.storage_response_error( + resp.status, resp.reason, resp.read()) + + # Return the generation so that the result URI can be built with this + # for automatic parallel uploads. + return resp.getheader('x-goog-generation') diff --git a/ext/boto/gs/lifecycle.py b/ext/boto/gs/lifecycle.py new file mode 100644 index 0000000000..8b83890806 --- /dev/null +++ b/ext/boto/gs/lifecycle.py @@ -0,0 +1,224 @@ +# Copyright 2013 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import InvalidLifecycleConfigError + +# Relevant tags for the lifecycle configuration XML document. +LIFECYCLE_CONFIG = 'LifecycleConfiguration' +RULE = 'Rule' +ACTION = 'Action' +DELETE = 'Delete' +SET_STORAGE_CLASS = 'SetStorageClass' +CONDITION = 'Condition' +AGE = 'Age' +CREATED_BEFORE = 'CreatedBefore' +NUM_NEWER_VERSIONS = 'NumberOfNewerVersions' +IS_LIVE = 'IsLive' +MATCHES_STORAGE_CLASS = 'MatchesStorageClass' + +# List of all action elements. +LEGAL_ACTIONS = [DELETE, SET_STORAGE_CLASS] +# List of all condition elements. +LEGAL_CONDITIONS = [AGE, CREATED_BEFORE, NUM_NEWER_VERSIONS, IS_LIVE, + MATCHES_STORAGE_CLASS] +# List of conditions elements that may be repeated. +LEGAL_REPEATABLE_CONDITIONS = [MATCHES_STORAGE_CLASS] + +class Rule(object): + """ + A lifecycle rule for a bucket. + + :ivar action: Action to be taken. + + :ivar action_text: The text value for the specified action, if any. + + :ivar conditions: A dictionary of conditions that specify when the action + should be taken. Each item in the dictionary represents the name and + value (or a list of multiple values, if applicable) of a condition. + """ + + def __init__(self, action=None, action_text=None, conditions=None): + self.action = action + self.action_text = action_text + self.conditions = conditions or {} + + # Name of the current enclosing tag (used to validate the schema). + self.current_tag = RULE + + def validateStartTag(self, tag, parent): + """Verify parent of the start tag.""" + if self.current_tag != parent: + raise InvalidLifecycleConfigError( + 'Invalid tag %s found inside %s tag' % (tag, self.current_tag)) + + def validateEndTag(self, tag): + """Verify end tag against the start tag.""" + if tag != self.current_tag: + raise InvalidLifecycleConfigError( + 'Mismatched start and end tags (%s/%s)' % + (self.current_tag, tag)) + + def startElement(self, name, attrs, connection): + if name == ACTION: + self.validateStartTag(name, RULE) + elif name in LEGAL_ACTIONS: + self.validateStartTag(name, ACTION) + # Verify there is only one action tag in the rule. + if self.action is not None: + raise InvalidLifecycleConfigError( + 'Only one action tag is allowed in each rule') + self.action = name + elif name == CONDITION: + self.validateStartTag(name, RULE) + elif name in LEGAL_CONDITIONS: + self.validateStartTag(name, CONDITION) + # Verify there is no duplicate conditions. + if (name in self.conditions and + name not in LEGAL_REPEATABLE_CONDITIONS): + raise InvalidLifecycleConfigError( + 'Found duplicate non-repeatable conditions %s' % name) + else: + raise InvalidLifecycleConfigError('Unsupported tag ' + name) + self.current_tag = name + + def endElement(self, name, value, connection): + self.validateEndTag(name) + if name == RULE: + # We have to validate the rule after it is fully populated because + # the action and condition elements could be in any order. + self.validate() + elif name == ACTION: + self.current_tag = RULE + elif name in LEGAL_ACTIONS: + if name == SET_STORAGE_CLASS and value is not None: + self.action_text = value.strip() + self.current_tag = ACTION + elif name == CONDITION: + self.current_tag = RULE + elif name in LEGAL_CONDITIONS: + self.current_tag = CONDITION + # Some conditions specify a list of values. + if name in LEGAL_REPEATABLE_CONDITIONS: + if name not in self.conditions: + self.conditions[name] = [] + self.conditions[name].append(value.strip()) + else: + self.conditions[name] = value.strip() + else: + raise InvalidLifecycleConfigError('Unsupported end tag ' + name) + + def validate(self): + """Validate the rule.""" + if not self.action: + raise InvalidLifecycleConfigError( + 'No action was specified in the rule') + if not self.conditions: + raise InvalidLifecycleConfigError( + 'No condition was specified for action %s' % self.action) + + def to_xml(self): + """Convert the rule into XML string representation.""" + s = ['<' + RULE + '>'] + s.append('<' + ACTION + '>') + if self.action_text: + s.extend(['<' + self.action + '>', + self.action_text, + '']) + else: + s.append('<' + self.action + '/>') + s.append('') + s.append('<' + CONDITION + '>') + for condition_name in self.conditions: + if condition_name not in LEGAL_CONDITIONS: + continue + if condition_name in LEGAL_REPEATABLE_CONDITIONS: + condition_values = self.conditions[condition_name] + else: + # Wrap condition value in a list, allowing us to iterate over + # all condition values using the same logic. + condition_values = [self.conditions[condition_name]] + for condition_value in condition_values: + s.extend(['<' + condition_name + '>', + condition_value, + '']) + s.append('') + s.append('') + return ''.join(s) + +class LifecycleConfig(list): + """ + A container of rules associated with a lifecycle configuration. + """ + + def __init__(self): + # Track if root tag has been seen. + self.has_root_tag = False + + def startElement(self, name, attrs, connection): + if name == LIFECYCLE_CONFIG: + if self.has_root_tag: + raise InvalidLifecycleConfigError( + 'Only one root tag is allowed in the XML') + self.has_root_tag = True + elif name == RULE: + if not self.has_root_tag: + raise InvalidLifecycleConfigError('Invalid root tag ' + name) + rule = Rule() + self.append(rule) + return rule + else: + raise InvalidLifecycleConfigError('Unsupported tag ' + name) + + def endElement(self, name, value, connection): + if name == LIFECYCLE_CONFIG: + pass + else: + raise InvalidLifecycleConfigError('Unsupported end tag ' + name) + + def to_xml(self): + """Convert LifecycleConfig object into XML string representation.""" + s = [''] + s.append('<' + LIFECYCLE_CONFIG + '>') + for rule in self: + s.append(rule.to_xml()) + s.append('') + return ''.join(s) + + def add_rule(self, action, action_text, conditions): + """ + Add a rule to this Lifecycle configuration. This only adds the rule to + the local copy. To install the new rule(s) on the bucket, you need to + pass this Lifecycle config object to the configure_lifecycle method of + the Bucket object. + + :type action: str + :param action: Action to be taken. + + :type action_text: str + :param action_text: Value for the specified action. + + :type conditions: dict + :param conditions: A dictionary of conditions that specify when the + action should be taken. Each item in the dictionary represents the name + and value of a condition. + """ + rule = Rule(action, action_text, conditions) + self.append(rule) diff --git a/ext/boto/gs/resumable_upload_handler.py b/ext/boto/gs/resumable_upload_handler.py new file mode 100644 index 0000000000..d74434693d --- /dev/null +++ b/ext/boto/gs/resumable_upload_handler.py @@ -0,0 +1,679 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import errno +import httplib +import os +import random +import re +import socket +import time +import urlparse +from hashlib import md5 +from boto import config, UserAgent +from boto.connection import AWSAuthConnection +from boto.exception import InvalidUriError +from boto.exception import ResumableTransferDisposition +from boto.exception import ResumableUploadException +from boto.s3.keyfile import KeyFile + +""" +Handler for Google Cloud Storage resumable uploads. See +http://code.google.com/apis/storage/docs/developer-guide.html#resumable +for details. + +Resumable uploads will retry failed uploads, resuming at the byte +count completed by the last upload attempt. If too many retries happen with +no progress (per configurable num_retries param), the upload will be +aborted in the current process. + +The caller can optionally specify a tracker_file_name param in the +ResumableUploadHandler constructor. If you do this, that file will +save the state needed to allow retrying later, in a separate process +(e.g., in a later run of gsutil). +""" + + +class ResumableUploadHandler(object): + + BUFFER_SIZE = 8192 + RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, + socket.gaierror) + + # (start, end) response indicating server has nothing (upload protocol uses + # inclusive numbering). + SERVER_HAS_NOTHING = (0, -1) + + def __init__(self, tracker_file_name=None, num_retries=None): + """ + Constructor. Instantiate once for each uploaded file. + + :type tracker_file_name: string + :param tracker_file_name: optional file name to save tracker URI. + If supplied and the current process fails the upload, it can be + retried in a new process. If called with an existing file containing + a valid tracker URI, we'll resume the upload from this URI; else + we'll start a new resumable upload (and write the URI to this + tracker file). + + :type num_retries: int + :param num_retries: the number of times we'll re-try a resumable upload + making no progress. (Count resets every time we get progress, so + upload can span many more than this number of retries.) + """ + self.tracker_file_name = tracker_file_name + self.num_retries = num_retries + self.server_has_bytes = 0 # Byte count at last server check. + self.tracker_uri = None + if tracker_file_name: + self._load_tracker_uri_from_file() + # Save upload_start_point in instance state so caller can find how + # much was transferred by this ResumableUploadHandler (across retries). + self.upload_start_point = None + + def _load_tracker_uri_from_file(self): + f = None + try: + f = open(self.tracker_file_name, 'r') + uri = f.readline().strip() + self._set_tracker_uri(uri) + except IOError as e: + # Ignore non-existent file (happens first time an upload + # is attempted on a file), but warn user for other errors. + if e.errno != errno.ENOENT: + # Will restart because self.tracker_uri is None. + print('Couldn\'t read URI tracker file (%s): %s. Restarting ' + 'upload from scratch.' % + (self.tracker_file_name, e.strerror)) + except InvalidUriError as e: + # Warn user, but proceed (will restart because + # self.tracker_uri is None). + print('Invalid tracker URI (%s) found in URI tracker file ' + '(%s). Restarting upload from scratch.' % + (uri, self.tracker_file_name)) + finally: + if f: + f.close() + + def _save_tracker_uri_to_file(self): + """ + Saves URI to tracker file if one was passed to constructor. + """ + if not self.tracker_file_name: + return + f = None + try: + with os.fdopen(os.open(self.tracker_file_name, + os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f: + f.write(self.tracker_uri) + except IOError as e: + raise ResumableUploadException( + 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen' + 'if you\'re using an incorrectly configured upload tool\n' + '(e.g., gsutil configured to save tracker files to an ' + 'unwritable directory)' % + (self.tracker_file_name, e.strerror), + ResumableTransferDisposition.ABORT) + + def _set_tracker_uri(self, uri): + """ + Called when we start a new resumable upload or get a new tracker + URI for the upload. Saves URI and resets upload state. + + Raises InvalidUriError if URI is syntactically invalid. + """ + parse_result = urlparse.urlparse(uri) + if (parse_result.scheme.lower() not in ['http', 'https'] or + not parse_result.netloc): + raise InvalidUriError('Invalid tracker URI (%s)' % uri) + self.tracker_uri = uri + self.tracker_uri_host = parse_result.netloc + self.tracker_uri_path = '%s?%s' % ( + parse_result.path, parse_result.query) + self.server_has_bytes = 0 + + def get_tracker_uri(self): + """ + Returns upload tracker URI, or None if the upload has not yet started. + """ + return self.tracker_uri + + def get_upload_id(self): + """ + Returns the upload ID for the resumable upload, or None if the upload + has not yet started. + """ + # We extract the upload_id from the tracker uri. We could retrieve the + # upload_id from the headers in the response but this only works for + # the case where we get the tracker uri from the service. In the case + # where we get the tracker from the tracking file we need to do this + # logic anyway. + delim = '?upload_id=' + if self.tracker_uri and delim in self.tracker_uri: + return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):] + else: + return None + + def _remove_tracker_file(self): + if (self.tracker_file_name and + os.path.exists(self.tracker_file_name)): + os.unlink(self.tracker_file_name) + + def _build_content_range_header(self, range_spec='*', length_spec='*'): + return 'bytes %s/%s' % (range_spec, length_spec) + + def _query_server_state(self, conn, file_length): + """ + Queries server to find out state of given upload. + + Note that this method really just makes special case use of the + fact that the upload server always returns the current start/end + state whenever a PUT doesn't complete. + + Returns HTTP response from sending request. + + Raises ResumableUploadException if problem querying server. + """ + # Send an empty PUT so that server replies with this resumable + # transfer's state. + put_headers = {} + put_headers['Content-Range'] = ( + self._build_content_range_header('*', file_length)) + put_headers['Content-Length'] = '0' + return AWSAuthConnection.make_request(conn, 'PUT', + path=self.tracker_uri_path, + auth_path=self.tracker_uri_path, + headers=put_headers, + host=self.tracker_uri_host) + + def _query_server_pos(self, conn, file_length): + """ + Queries server to find out what bytes it currently has. + + Returns (server_start, server_end), where the values are inclusive. + For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. + + Raises ResumableUploadException if problem querying server. + """ + resp = self._query_server_state(conn, file_length) + if resp.status == 200: + # To handle the boundary condition where the server has the complete + # file, we return (server_start, file_length-1). That way the + # calling code can always simply read up through server_end. (If we + # didn't handle this boundary condition here, the caller would have + # to check whether server_end == file_length and read one fewer byte + # in that case.) + return (0, file_length - 1) # Completed upload. + if resp.status != 308: + # This means the server didn't have any state for the given + # upload ID, which can happen (for example) if the caller saved + # the tracker URI to a file and then tried to restart the transfer + # after that upload ID has gone stale. In that case we need to + # start a new transfer (and the caller will then save the new + # tracker URI to the tracker file). + raise ResumableUploadException( + 'Got non-308 response (%s) from server state query' % + resp.status, ResumableTransferDisposition.START_OVER) + got_valid_response = False + range_spec = resp.getheader('range') + if range_spec: + # Parse 'bytes=-' range_spec. + m = re.search('bytes=(\d+)-(\d+)', range_spec) + if m: + server_start = long(m.group(1)) + server_end = long(m.group(2)) + got_valid_response = True + else: + # No Range header, which means the server does not yet have + # any bytes. Note that the Range header uses inclusive 'from' + # and 'to' values. Since Range 0-0 would mean that the server + # has byte 0, omitting the Range header is used to indicate that + # the server doesn't have any bytes. + return self.SERVER_HAS_NOTHING + if not got_valid_response: + raise ResumableUploadException( + 'Couldn\'t parse upload server state query response (%s)' % + str(resp.getheaders()), ResumableTransferDisposition.START_OVER) + if conn.debug >= 1: + print('Server has: Range: %d - %d.' % (server_start, server_end)) + return (server_start, server_end) + + def _start_new_resumable_upload(self, key, headers=None): + """ + Starts a new resumable upload. + + Raises ResumableUploadException if any errors occur. + """ + conn = key.bucket.connection + if conn.debug >= 1: + print('Starting new resumable upload.') + self.server_has_bytes = 0 + + # Start a new resumable upload by sending a POST request with an + # empty body and the "X-Goog-Resumable: start" header. Include any + # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length + # (and raise an exception if they tried to pass one, since it's + # a semantic error to specify it at this point, and if we were to + # include one now it would cause the server to expect that many + # bytes; the POST doesn't include the actual file bytes We set + # the Content-Length in the subsequent PUT, based on the uploaded + # file size. + post_headers = {} + for k in headers: + if k.lower() == 'content-length': + raise ResumableUploadException( + 'Attempt to specify Content-Length header (disallowed)', + ResumableTransferDisposition.ABORT) + post_headers[k] = headers[k] + post_headers[conn.provider.resumable_upload_header] = 'start' + + resp = conn.make_request( + 'POST', key.bucket.name, key.name, post_headers) + # Get tracker URI from response 'Location' header. + body = resp.read() + + # Check for various status conditions. + if resp.status in [500, 503]: + # Retry status 500 and 503 errors after a delay. + raise ResumableUploadException( + 'Got status %d from attempt to start resumable upload. ' + 'Will wait/retry' % resp.status, + ResumableTransferDisposition.WAIT_BEFORE_RETRY) + elif resp.status != 200 and resp.status != 201: + raise ResumableUploadException( + 'Got status %d from attempt to start resumable upload. ' + 'Aborting' % resp.status, + ResumableTransferDisposition.ABORT) + + # Else we got 200 or 201 response code, indicating the resumable + # upload was created. + tracker_uri = resp.getheader('Location') + if not tracker_uri: + raise ResumableUploadException( + 'No resumable tracker URI found in resumable initiation ' + 'POST response (%s)' % body, + ResumableTransferDisposition.WAIT_BEFORE_RETRY) + self._set_tracker_uri(tracker_uri) + self._save_tracker_uri_to_file() + + def _upload_file_bytes(self, conn, http_conn, fp, file_length, + total_bytes_uploaded, cb, num_cb, headers): + """ + Makes one attempt to upload file bytes, using an existing resumable + upload connection. + + Returns (etag, generation, metageneration) from server upon success. + + Raises ResumableUploadException if any problems occur. + """ + buf = fp.read(self.BUFFER_SIZE) + if cb: + # The cb_count represents the number of full buffers to send between + # cb executions. + if num_cb > 2: + cb_count = file_length / self.BUFFER_SIZE / (num_cb-2) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(total_bytes_uploaded, file_length) + + # Build resumable upload headers for the transfer. Don't send a + # Content-Range header if the file is 0 bytes long, because the + # resumable upload protocol uses an *inclusive* end-range (so, sending + # 'bytes 0-0/1' would actually mean you're sending a 1-byte file). + if not headers: + put_headers = {} + else: + put_headers = headers.copy() + if file_length: + if total_bytes_uploaded == file_length: + range_header = self._build_content_range_header( + '*', file_length) + else: + range_header = self._build_content_range_header( + '%d-%d' % (total_bytes_uploaded, file_length - 1), + file_length) + put_headers['Content-Range'] = range_header + # Set Content-Length to the total bytes we'll send with this PUT. + put_headers['Content-Length'] = str(file_length - total_bytes_uploaded) + http_request = AWSAuthConnection.build_base_http_request( + conn, 'PUT', path=self.tracker_uri_path, auth_path=None, + headers=put_headers, host=self.tracker_uri_host) + http_conn.putrequest('PUT', http_request.path) + for k in put_headers: + http_conn.putheader(k, put_headers[k]) + http_conn.endheaders() + + # Turn off debug on http connection so upload content isn't included + # in debug stream. + http_conn.set_debuglevel(0) + while buf: + http_conn.send(buf) + for alg in self.digesters: + self.digesters[alg].update(buf) + total_bytes_uploaded += len(buf) + if cb: + i += 1 + if i == cb_count or cb_count == -1: + cb(total_bytes_uploaded, file_length) + i = 0 + buf = fp.read(self.BUFFER_SIZE) + http_conn.set_debuglevel(conn.debug) + if cb: + cb(total_bytes_uploaded, file_length) + if total_bytes_uploaded != file_length: + # Abort (and delete the tracker file) so if the user retries + # they'll start a new resumable upload rather than potentially + # attempting to pick back up later where we left off. + raise ResumableUploadException( + 'File changed during upload: EOF at %d bytes of %d byte file.' % + (total_bytes_uploaded, file_length), + ResumableTransferDisposition.ABORT) + resp = http_conn.getresponse() + # Restore http connection debug level. + http_conn.set_debuglevel(conn.debug) + + if resp.status == 200: + # Success. + return (resp.getheader('etag'), + resp.getheader('x-goog-generation'), + resp.getheader('x-goog-metageneration')) + # Retry timeout (408) and status 500 and 503 errors after a delay. + elif resp.status in [408, 500, 503]: + disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY + else: + # Catch all for any other error codes. + disposition = ResumableTransferDisposition.ABORT + raise ResumableUploadException('Got response code %d while attempting ' + 'upload (%s)' % + (resp.status, resp.reason), disposition) + + def _attempt_resumable_upload(self, key, fp, file_length, headers, cb, + num_cb): + """ + Attempts a resumable upload. + + Returns (etag, generation, metageneration) from server upon success. + + Raises ResumableUploadException if any problems occur. + """ + (server_start, server_end) = self.SERVER_HAS_NOTHING + conn = key.bucket.connection + if self.tracker_uri: + # Try to resume existing resumable upload. + try: + (server_start, server_end) = ( + self._query_server_pos(conn, file_length)) + self.server_has_bytes = server_start + + if server_end: + # If the server already has some of the content, we need to + # update the digesters with the bytes that have already been + # uploaded to ensure we get a complete hash in the end. + print('Catching up hash digest(s) for resumed upload') + fp.seek(0) + # Read local file's bytes through position server has. For + # example, if server has (0, 3) we want to read 3-0+1=4 bytes. + bytes_to_go = server_end + 1 + while bytes_to_go: + chunk = fp.read(min(key.BufferSize, bytes_to_go)) + if not chunk: + raise ResumableUploadException( + 'Hit end of file during resumable upload hash ' + 'catchup. This should not happen under\n' + 'normal circumstances, as it indicates the ' + 'server has more bytes of this transfer\nthan' + ' the current file size. Restarting upload.', + ResumableTransferDisposition.START_OVER) + for alg in self.digesters: + self.digesters[alg].update(chunk) + bytes_to_go -= len(chunk) + + if conn.debug >= 1: + print('Resuming transfer.') + except ResumableUploadException as e: + if conn.debug >= 1: + print('Unable to resume transfer (%s).' % e.message) + self._start_new_resumable_upload(key, headers) + else: + self._start_new_resumable_upload(key, headers) + + # upload_start_point allows the code that instantiated the + # ResumableUploadHandler to find out the point from which it started + # uploading (e.g., so it can correctly compute throughput). + if self.upload_start_point is None: + self.upload_start_point = server_end + + total_bytes_uploaded = server_end + 1 + # Corner case: Don't attempt to seek if we've already uploaded the + # entire file, because if the file is a stream (e.g., the KeyFile + # wrapper around input key when copying between providers), attempting + # to seek to the end of file would result in an InvalidRange error. + if file_length < total_bytes_uploaded: + fp.seek(total_bytes_uploaded) + conn = key.bucket.connection + + # Get a new HTTP connection (vs conn.get_http_connection(), which reuses + # pool connections) because httplib requires a new HTTP connection per + # transaction. (Without this, calling http_conn.getresponse() would get + # "ResponseNotReady".) + http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port, + conn.is_secure) + http_conn.set_debuglevel(conn.debug) + + # Make sure to close http_conn at end so if a local file read + # failure occurs partway through server will terminate current upload + # and can report that progress on next attempt. + try: + return self._upload_file_bytes(conn, http_conn, fp, file_length, + total_bytes_uploaded, cb, num_cb, + headers) + except (ResumableUploadException, socket.error): + resp = self._query_server_state(conn, file_length) + if resp.status == 400: + raise ResumableUploadException('Got 400 response from server ' + 'state query after failed resumable upload attempt. This ' + 'can happen for various reasons, including specifying an ' + 'invalid request (e.g., an invalid canned ACL) or if the ' + 'file size changed between upload attempts', + ResumableTransferDisposition.ABORT) + else: + raise + finally: + http_conn.close() + + def _check_final_md5(self, key, etag): + """ + Checks that etag from server agrees with md5 computed before upload. + This is important, since the upload could have spanned a number of + hours and multiple processes (e.g., gsutil runs), and the user could + change some of the file and not realize they have inconsistent data. + """ + if key.bucket.connection.debug >= 1: + print('Checking md5 against etag.') + if key.md5 != etag.strip('"\''): + # Call key.open_read() before attempting to delete the + # (incorrect-content) key, so we perform that request on a + # different HTTP connection. This is neededb because httplib + # will return a "Response not ready" error if you try to perform + # a second transaction on the connection. + key.open_read() + key.close() + key.delete() + raise ResumableUploadException( + 'File changed during upload: md5 signature doesn\'t match etag ' + '(incorrect uploaded object deleted)', + ResumableTransferDisposition.ABORT) + + def handle_resumable_upload_exception(self, e, debug): + if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS): + if debug >= 1: + print('Caught non-retryable ResumableUploadException (%s); ' + 'aborting but retaining tracker file' % e.message) + raise + elif (e.disposition == ResumableTransferDisposition.ABORT): + if debug >= 1: + print('Caught non-retryable ResumableUploadException (%s); ' + 'aborting and removing tracker file' % e.message) + self._remove_tracker_file() + raise + else: + if debug >= 1: + print('Caught ResumableUploadException (%s) - will retry' % + e.message) + + def track_progress_less_iterations(self, server_had_bytes_before_attempt, + roll_back_md5=True, debug=0): + # At this point we had a re-tryable failure; see if made progress. + if self.server_has_bytes > server_had_bytes_before_attempt: + self.progress_less_iterations = 0 # If progress, reset counter. + else: + self.progress_less_iterations += 1 + if roll_back_md5: + # Rollback any potential hash updates, as we did not + # make any progress in this iteration. + self.digesters = self.digesters_before_attempt + + if self.progress_less_iterations > self.num_retries: + # Don't retry any longer in the current process. + raise ResumableUploadException( + 'Too many resumable upload attempts failed without ' + 'progress. You might try this upload again later', + ResumableTransferDisposition.ABORT_CUR_PROCESS) + + # Use binary exponential backoff to desynchronize client requests. + sleep_time_secs = random.random() * (2**self.progress_less_iterations) + if debug >= 1: + print('Got retryable failure (%d progress-less in a row).\n' + 'Sleeping %3.1f seconds before re-trying' % + (self.progress_less_iterations, sleep_time_secs)) + time.sleep(sleep_time_secs) + + def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None): + """ + Upload a file to a key into a bucket on GS, using GS resumable upload + protocol. + + :type key: :class:`boto.s3.key.Key` or subclass + :param key: The Key object to which data is to be uploaded + + :type fp: file-like object + :param fp: The file pointer to upload + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type cb: function + :param cb: a callback function that will be called to report progress on + the upload. The callback should accept two integer parameters, the + first representing the number of bytes that have been successfully + transmitted to GS, and the second representing the total number of + bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter, this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be called + during the file transfer. Providing a negative integer will cause + your callback to be called with each buffer read. + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary mapping hash algorithm + descriptions to corresponding state-ful hashing objects that + implement update(), digest(), and copy() (e.g. hashlib.md5()). + Defaults to {'md5': md5()}. + + Raises ResumableUploadException if a problem occurs during the transfer. + """ + + if not headers: + headers = {} + # If Content-Type header is present and set to None, remove it. + # This is gsutil's way of asking boto to refrain from auto-generating + # that header. + CT = 'Content-Type' + if CT in headers and headers[CT] is None: + del headers[CT] + + headers['User-Agent'] = UserAgent + + # Determine file size different ways for case where fp is actually a + # wrapper around a Key vs an actual file. + if isinstance(fp, KeyFile): + file_length = fp.getkey().size + else: + fp.seek(0, os.SEEK_END) + file_length = fp.tell() + fp.seek(0) + debug = key.bucket.connection.debug + + # Compute the MD5 checksum on the fly. + if hash_algs is None: + hash_algs = {'md5': md5} + self.digesters = dict( + (alg, hash_algs[alg]()) for alg in hash_algs or {}) + + # Use num-retries from constructor if one was provided; else check + # for a value specified in the boto config file; else default to 5. + if self.num_retries is None: + self.num_retries = config.getint('Boto', 'num_retries', 6) + self.progress_less_iterations = 0 + + while True: # Retry as long as we're making progress. + server_had_bytes_before_attempt = self.server_has_bytes + self.digesters_before_attempt = dict( + (alg, self.digesters[alg].copy()) + for alg in self.digesters) + try: + # Save generation and metageneration in class state so caller + # can find these values, for use in preconditions of future + # operations on the uploaded object. + (etag, self.generation, self.metageneration) = ( + self._attempt_resumable_upload(key, fp, file_length, + headers, cb, num_cb)) + + # Get the final digests for the uploaded content. + for alg in self.digesters: + key.local_hashes[alg] = self.digesters[alg].digest() + + # Upload succceded, so remove the tracker file (if have one). + self._remove_tracker_file() + self._check_final_md5(key, etag) + key.generation = self.generation + if debug >= 1: + print('Resumable upload complete.') + return + except self.RETRYABLE_EXCEPTIONS as e: + if debug >= 1: + print('Caught exception (%s)' % e.__repr__()) + if isinstance(e, IOError) and e.errno == errno.EPIPE: + # Broken pipe error causes httplib to immediately + # close the socket (http://bugs.python.org/issue5542), + # so we need to close the connection before we resume + # the upload (which will cause a new connection to be + # opened the next time an HTTP request is sent). + key.bucket.connection.connection.close() + except ResumableUploadException as e: + self.handle_resumable_upload_exception(e, debug) + + self.track_progress_less_iterations(server_had_bytes_before_attempt, + True, debug) diff --git a/ext/boto/gs/user.py b/ext/boto/gs/user.py new file mode 100644 index 0000000000..c3072952f9 --- /dev/null +++ b/ext/boto/gs/user.py @@ -0,0 +1,54 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class User(object): + def __init__(self, parent=None, id='', name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.name = name + + def __repr__(self): + return self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + if self.name: + s += '%s' % self.name + s += '' % element_name + return s diff --git a/ext/boto/handler.py b/ext/boto/handler.py new file mode 100644 index 0000000000..3b5f073275 --- /dev/null +++ b/ext/boto/handler.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax + +from boto.compat import StringIO + + +class XmlHandler(xml.sax.ContentHandler): + + def __init__(self, root_node, connection): + self.connection = connection + self.nodes = [('root', root_node)] + self.current_text = '' + + def startElement(self, name, attrs): + self.current_text = '' + new_node = self.nodes[-1][1].startElement(name, attrs, self.connection) + if new_node is not None: + self.nodes.append((name, new_node)) + + def endElement(self, name): + self.nodes[-1][1].endElement(name, self.current_text, self.connection) + if self.nodes[-1][0] == name: + if hasattr(self.nodes[-1][1], 'endNode'): + self.nodes[-1][1].endNode(self.connection) + self.nodes.pop() + self.current_text = '' + + def characters(self, content): + self.current_text += content + + +class XmlHandlerWrapper(object): + def __init__(self, root_node, connection): + self.handler = XmlHandler(root_node, connection) + self.parser = xml.sax.make_parser() + self.parser.setContentHandler(self.handler) + self.parser.setFeature(xml.sax.handler.feature_external_ges, 0) + + def parseString(self, content): + return self.parser.parse(StringIO(content)) diff --git a/ext/boto/https_connection.py b/ext/boto/https_connection.py new file mode 100644 index 0000000000..ddc31a1522 --- /dev/null +++ b/ext/boto/https_connection.py @@ -0,0 +1,138 @@ +# Copyright 2007,2011 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is derived from +# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py + + +"""Extensions to allow HTTPS requests with SSL certificate validation.""" + +import re +import socket +import ssl + +import boto + +from boto.compat import six, http_client + + +class InvalidCertificateException(http_client.HTTPException): + """Raised when a certificate is provided with an invalid hostname.""" + + def __init__(self, host, cert, reason): + """Constructor. + + Args: + host: The hostname the connection was made to. + cert: The SSL certificate (as a dictionary) the host returned. + """ + http_client.HTTPException.__init__(self) + self.host = host + self.cert = cert + self.reason = reason + + def __str__(self): + return ('Host %s returned an invalid certificate (%s): %s' % + (self.host, self.reason, self.cert)) + + +def GetValidHostsForCert(cert): + """Returns a list of valid host globs for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + Returns: + list: A list of valid host globs. + """ + if 'subjectAltName' in cert: + return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] + else: + return [x[0][1] for x in cert['subject'] + if x[0][0].lower() == 'commonname'] + + +def ValidateCertificateHostname(cert, hostname): + """Validates that a given hostname is valid for an SSL certificate. + + Args: + cert: A dictionary representing an SSL certificate. + hostname: The hostname to test. + Returns: + bool: Whether or not the hostname is valid for this certificate. + """ + hosts = GetValidHostsForCert(cert) + boto.log.debug( + "validating server certificate: hostname=%s, certificate hosts=%s", + hostname, hosts) + for host in hosts: + host_re = host.replace('.', '\.').replace('*', '[^.]*') + if re.search('^%s$' % (host_re,), hostname, re.I): + return True + return False + + +class CertValidatingHTTPSConnection(http_client.HTTPConnection): + """An HTTPConnection that connects over SSL and validates certificates.""" + + default_port = http_client.HTTPS_PORT + + def __init__(self, host, port=default_port, key_file=None, cert_file=None, + ca_certs=None, strict=None, **kwargs): + """Constructor. + + Args: + host: The hostname. Can be in 'host:port' form. + port: The port. Defaults to 443. + key_file: A file containing the client's private key + cert_file: A file containing the client's certificates + ca_certs: A file contianing a set of concatenated certificate authority + certs for validating the server against. + strict: When true, causes BadStatusLine to be raised if the status line + can't be parsed as a valid HTTP/1.0 or 1.1 status line. + """ + if six.PY2: + # Python 3.2 and newer have deprecated and removed the strict + # parameter. Since the params are supported as keyword arguments + # we conditionally add it here. + kwargs['strict'] = strict + + http_client.HTTPConnection.__init__(self, host=host, port=port, **kwargs) + self.key_file = key_file + self.cert_file = cert_file + self.ca_certs = ca_certs + + def connect(self): + "Connect to a host on a given (SSL) port." + if hasattr(self, "timeout"): + sock = socket.create_connection((self.host, self.port), self.timeout) + else: + sock = socket.create_connection((self.host, self.port)) + msg = "wrapping ssl socket; " + if self.ca_certs: + msg += "CA certificate file=%s" % self.ca_certs + else: + msg += "using system provided SSL certs" + boto.log.debug(msg) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, + certfile=self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_certs) + cert = self.sock.getpeercert() + hostname = self.host.split(':', 0)[0] + if not ValidateCertificateHostname(cert, hostname): + raise InvalidCertificateException(hostname, + cert, + 'remote hostname "%s" does not match ' + 'certificate' % hostname) diff --git a/ext/boto/iam/__init__.py b/ext/boto/iam/__init__.py new file mode 100644 index 0000000000..cd45d7ad16 --- /dev/null +++ b/ext/boto/iam/__init__.py @@ -0,0 +1,93 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# this is here for backward compatibility +# originally, the IAMConnection class was defined here +from boto.iam.connection import IAMConnection +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +class IAMRegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the IAM service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + regions = get_regions( + 'iam', + region_cls=IAMRegionInfo, + connection_cls=IAMConnection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + IAMRegionInfo( + name='universal', + endpoint='iam.amazonaws.com', + connection_cls=IAMConnection + ) + ) + + return regions + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.iam.connection.IAMConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.iam.connection.IAMConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + if region_name == 'universal': + region = IAMRegionInfo( + name='universal', + endpoint='iam.amazonaws.com', + connection_cls=IAMConnection + ) + return region.connect(**kw_params) + + return connect('iam', region_name, region_cls=IAMRegionInfo, + connection_cls=IAMConnection, **kw_params) diff --git a/ext/boto/iam/connection.py b/ext/boto/iam/connection.py new file mode 100644 index 0000000000..f66931f460 --- /dev/null +++ b/ext/boto/iam/connection.py @@ -0,0 +1,1932 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import boto.jsonresponse +from boto.compat import json, six +from boto.resultset import ResultSet +from boto.iam.summarymap import SummaryMap +from boto.connection import AWSQueryConnection + +DEFAULT_POLICY_DOCUMENTS = { + 'default': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, + 'amazonaws.com.cn': { + 'Statement': [ + { + 'Principal': { + 'Service': ['ec2.amazonaws.com.cn'] + }, + 'Effect': 'Allow', + 'Action': ['sts:AssumeRole'] + } + ] + }, +} +# For backward-compatibility, we'll preserve this here. +ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default']) + + +class IAMConnection(AWSQueryConnection): + + APIVersion = '2010-05-08' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host='iam.amazonaws.com', + debug=0, https_connection_factory=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + super(IAMConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, + path, security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_response(self, action, params, path='/', parent=None, + verb='POST', list_marker='Set'): + """ + Utility method to handle calls to IAM and parsing of responses. + """ + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + if body: + e = boto.jsonresponse.Element(list_marker=list_marker, + pythonize_name=True) + h = boto.jsonresponse.XmlHandler(e, parent) + h.parse(body) + return e + else: + # Support empty responses, e.g. deleting a SAML provider + # according to the official documentation. + return {} + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + # + # Group methods + # + + def get_all_groups(self, path_prefix='/', marker=None, max_items=None): + """ + List the groups that have the specified path prefix. + + :type path_prefix: string + :param path_prefix: If provided, only groups whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {} + if path_prefix: + params['PathPrefix'] = path_prefix + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroups', params, + list_marker='Groups') + + def get_group(self, group_name, marker=None, max_items=None): + """ + Return a list of users that are in the specified group. + + :type group_name: string + :param group_name: The name of the group whose information should + be returned. + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'GroupName': group_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('GetGroup', params, list_marker='Users') + + def create_group(self, group_name, path='/'): + """ + Create a group. + + :type group_name: string + :param group_name: The name of the new group + + :type path: string + :param path: The path to the group (Optional). Defaults to /. + + """ + params = {'GroupName': group_name, + 'Path': path} + return self.get_response('CreateGroup', params) + + def delete_group(self, group_name): + """ + Delete a group. The group must not contain any Users or + have any attached policies + + :type group_name: string + :param group_name: The name of the group to delete. + + """ + params = {'GroupName': group_name} + return self.get_response('DeleteGroup', params) + + def update_group(self, group_name, new_group_name=None, new_path=None): + """ + Updates name and/or path of the specified group. + + :type group_name: string + :param group_name: The name of the new group + + :type new_group_name: string + :param new_group_name: If provided, the name of the group will be + changed to this name. + + :type new_path: string + :param new_path: If provided, the path of the group will be + changed to this path. + + """ + params = {'GroupName': group_name} + if new_group_name: + params['NewGroupName'] = new_group_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateGroup', params) + + def add_user_to_group(self, group_name, user_name): + """ + Add a user to a group + + :type group_name: string + :param group_name: The name of the group + + :type user_name: string + :param user_name: The to be added to the group. + + """ + params = {'GroupName': group_name, + 'UserName': user_name} + return self.get_response('AddUserToGroup', params) + + def remove_user_from_group(self, group_name, user_name): + """ + Remove a user from a group. + + :type group_name: string + :param group_name: The name of the group + + :type user_name: string + :param user_name: The user to remove from the group. + + """ + params = {'GroupName': group_name, + 'UserName': user_name} + return self.get_response('RemoveUserFromGroup', params) + + def put_group_policy(self, group_name, policy_name, policy_json): + """ + Adds or updates the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + :type policy_json: string + :param policy_json: The policy document. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_json} + return self.get_response('PutGroupPolicy', params, verb='POST') + + def get_all_group_policies(self, group_name, marker=None, max_items=None): + """ + List the names of the policies associated with the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'GroupName': group_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroupPolicies', params, + list_marker='PolicyNames') + + def get_group_policy(self, group_name, policy_name): + """ + Retrieves the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name} + return self.get_response('GetGroupPolicy', params, verb='POST') + + def delete_group_policy(self, group_name, policy_name): + """ + Deletes the specified policy document for the specified group. + + :type group_name: string + :param group_name: The name of the group the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to delete. + + """ + params = {'GroupName': group_name, + 'PolicyName': policy_name} + return self.get_response('DeleteGroupPolicy', params, verb='POST') + + def get_all_users(self, path_prefix='/', marker=None, max_items=None): + """ + List the users that have the specified path prefix. + + :type path_prefix: string + :param path_prefix: If provided, only users whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'PathPrefix': path_prefix} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListUsers', params, list_marker='Users') + + # + # User methods + # + + def create_user(self, user_name, path='/'): + """ + Create a user. + + :type user_name: string + :param user_name: The name of the new user + + :type path: string + :param path: The path in which the user will be created. + Defaults to /. + + """ + params = {'UserName': user_name, + 'Path': path} + return self.get_response('CreateUser', params) + + def delete_user(self, user_name): + """ + Delete a user including the user's path, GUID and ARN. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The name of the user to delete. + + """ + params = {'UserName': user_name} + return self.get_response('DeleteUser', params) + + def get_user(self, user_name=None): + """ + Retrieve information about the specified user. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The name of the user to retrieve. + If not specified, defaults to user making request. + """ + params = {} + if user_name: + params['UserName'] = user_name + return self.get_response('GetUser', params) + + def update_user(self, user_name, new_user_name=None, new_path=None): + """ + Updates name and/or path of the specified user. + + :type user_name: string + :param user_name: The name of the user + + :type new_user_name: string + :param new_user_name: If provided, the username of the user will be + changed to this username. + + :type new_path: string + :param new_path: If provided, the path of the user will be + changed to this path. + + """ + params = {'UserName': user_name} + if new_user_name: + params['NewUserName'] = new_user_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateUser', params) + + def get_all_user_policies(self, user_name, marker=None, max_items=None): + """ + List the names of the policies associated with the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListUserPolicies', params, + list_marker='PolicyNames') + + def put_user_policy(self, user_name, policy_name, policy_json): + """ + Adds or updates the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + :type policy_json: string + :param policy_json: The policy document. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_json} + return self.get_response('PutUserPolicy', params, verb='POST') + + def get_user_policy(self, user_name, policy_name): + """ + Retrieves the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to get. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name} + return self.get_response('GetUserPolicy', params, verb='POST') + + def delete_user_policy(self, user_name, policy_name): + """ + Deletes the specified policy document for the specified user. + + :type user_name: string + :param user_name: The name of the user the policy is associated with. + + :type policy_name: string + :param policy_name: The policy document to delete. + + """ + params = {'UserName': user_name, + 'PolicyName': policy_name} + return self.get_response('DeleteUserPolicy', params, verb='POST') + + def get_groups_for_user(self, user_name, marker=None, max_items=None): + """ + List the groups that a specified user belongs to. + + :type user_name: string + :param user_name: The name of the user to list groups for. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListGroupsForUser', params, + list_marker='Groups') + + # + # Access Keys + # + + def get_all_access_keys(self, user_name, marker=None, max_items=None): + """ + Get all access keys associated with an account. + + :type user_name: string + :param user_name: The username of the user + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListAccessKeys', params, + list_marker='AccessKeyMetadata') + + def create_access_key(self, user_name=None): + """ + Create a new AWS Secret Access Key and corresponding AWS Access Key ID + for the specified user. The default status for new keys is Active + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'UserName': user_name} + return self.get_response('CreateAccessKey', params) + + def update_access_key(self, access_key_id, status, user_name=None): + """ + Changes the status of the specified access key from Active to Inactive + or vice versa. This action can be used to disable a user's key as + part of a key rotation workflow. + + If the user_name is not specified, the user_name is determined + implicitly based on the AWS Access Key ID used to sign the request. + + :type access_key_id: string + :param access_key_id: The ID of the access key. + + :type status: string + :param status: Either Active or Inactive. + + :type user_name: string + :param user_name: The username of user (optional). + + """ + params = {'AccessKeyId': access_key_id, + 'Status': status} + if user_name: + params['UserName'] = user_name + return self.get_response('UpdateAccessKey', params) + + def delete_access_key(self, access_key_id, user_name=None): + """ + Delete an access key associated with a user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type access_key_id: string + :param access_key_id: The ID of the access key to be deleted. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'AccessKeyId': access_key_id} + if user_name: + params['UserName'] = user_name + return self.get_response('DeleteAccessKey', params) + + # + # Signing Certificates + # + + def get_all_signing_certs(self, marker=None, max_items=None, + user_name=None): + """ + Get all signing certificates associated with an account. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + if user_name: + params['UserName'] = user_name + return self.get_response('ListSigningCertificates', + params, list_marker='Certificates') + + def update_signing_cert(self, cert_id, status, user_name=None): + """ + Change the status of the specified signing certificate from + Active to Inactive or vice versa. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type cert_id: string + :param cert_id: The ID of the signing certificate + + :type status: string + :param status: Either Active or Inactive. + + :type user_name: string + :param user_name: The username of the user + """ + params = {'CertificateId': cert_id, + 'Status': status} + if user_name: + params['UserName'] = user_name + return self.get_response('UpdateSigningCertificate', params) + + def upload_signing_cert(self, cert_body, user_name=None): + """ + Uploads an X.509 signing certificate and associates it with + the specified user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type cert_body: string + :param cert_body: The body of the signing certificate. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'CertificateBody': cert_body} + if user_name: + params['UserName'] = user_name + return self.get_response('UploadSigningCertificate', params, + verb='POST') + + def delete_signing_cert(self, cert_id, user_name=None): + """ + Delete a signing certificate associated with a user. + + If the user_name is not specified, it is determined implicitly based + on the AWS Access Key ID used to sign the request. + + :type user_name: string + :param user_name: The username of the user + + :type cert_id: string + :param cert_id: The ID of the certificate. + + """ + params = {'CertificateId': cert_id} + if user_name: + params['UserName'] = user_name + return self.get_response('DeleteSigningCertificate', params) + + # + # Server Certificates + # + + def list_server_certs(self, path_prefix='/', + marker=None, max_items=None): + """ + Lists the server certificates that have the specified path prefix. + If none exist, the action returns an empty list. + + :type path_prefix: string + :param path_prefix: If provided, only certificates whose paths match + the provided prefix will be returned. + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + """ + params = {} + if path_prefix: + params['PathPrefix'] = path_prefix + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListServerCertificates', + params, + list_marker='ServerCertificateMetadataList') + + # Preserves backwards compatibility. + # TODO: Look into deprecating this eventually? + get_all_server_certs = list_server_certs + + def update_server_cert(self, cert_name, new_cert_name=None, + new_path=None): + """ + Updates the name and/or the path of the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate that you want + to update. + + :type new_cert_name: string + :param new_cert_name: The new name for the server certificate. + Include this only if you are updating the + server certificate's name. + + :type new_path: string + :param new_path: If provided, the path of the certificate will be + changed to this path. + """ + params = {'ServerCertificateName': cert_name} + if new_cert_name: + params['NewServerCertificateName'] = new_cert_name + if new_path: + params['NewPath'] = new_path + return self.get_response('UpdateServerCertificate', params) + + def upload_server_cert(self, cert_name, cert_body, private_key, + cert_chain=None, path=None): + """ + Uploads a server certificate entity for the AWS Account. + The server certificate entity includes a public key certificate, + a private key, and an optional certificate chain, which should + all be PEM-encoded. + + :type cert_name: string + :param cert_name: The name for the server certificate. Do not + include the path in this value. + + :type cert_body: string + :param cert_body: The contents of the public key certificate + in PEM-encoded format. + + :type private_key: string + :param private_key: The contents of the private key in + PEM-encoded format. + + :type cert_chain: string + :param cert_chain: The contents of the certificate chain. This + is typically a concatenation of the PEM-encoded + public key certificates of the chain. + + :type path: string + :param path: The path for the server certificate. + """ + params = {'ServerCertificateName': cert_name, + 'CertificateBody': cert_body, + 'PrivateKey': private_key} + if cert_chain: + params['CertificateChain'] = cert_chain + if path: + params['Path'] = path + return self.get_response('UploadServerCertificate', params, + verb='POST') + + def get_server_certificate(self, cert_name): + """ + Retrieves information about the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate you want + to retrieve information about. + + """ + params = {'ServerCertificateName': cert_name} + return self.get_response('GetServerCertificate', params) + + def delete_server_cert(self, cert_name): + """ + Delete the specified server certificate. + + :type cert_name: string + :param cert_name: The name of the server certificate you want + to delete. + + """ + params = {'ServerCertificateName': cert_name} + return self.get_response('DeleteServerCertificate', params) + + # + # MFA Devices + # + + def get_all_mfa_devices(self, user_name, marker=None, max_items=None): + """ + Get all MFA devices associated with an account. + + :type user_name: string + :param user_name: The username of the user + + :type marker: string + :param marker: Use this only when paginating results and only + in follow-up request after you've received a response + where the results are truncated. Set this to the value of + the Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this only when paginating results to indicate + the maximum number of groups you want in the response. + + """ + params = {'UserName': user_name} + if marker: + params['Marker'] = marker + if max_items: + params['MaxItems'] = max_items + return self.get_response('ListMFADevices', + params, list_marker='MFADevices') + + def enable_mfa_device(self, user_name, serial_number, + auth_code_1, auth_code_2): + """ + Enables the specified MFA device and associates it with the + specified user. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + :type auth_code_1: string + :param auth_code_1: An authentication code emitted by the device. + + :type auth_code_2: string + :param auth_code_2: A subsequent authentication code emitted + by the device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number, + 'AuthenticationCode1': auth_code_1, + 'AuthenticationCode2': auth_code_2} + return self.get_response('EnableMFADevice', params) + + def deactivate_mfa_device(self, user_name, serial_number): + """ + Deactivates the specified MFA device and removes it from + association with the user. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number} + return self.get_response('DeactivateMFADevice', params) + + def resync_mfa_device(self, user_name, serial_number, + auth_code_1, auth_code_2): + """ + Syncronizes the specified MFA device with the AWS servers. + + :type user_name: string + :param user_name: The username of the user + + :type serial_number: string + :param serial_number: The serial number which uniquely identifies + the MFA device. + + :type auth_code_1: string + :param auth_code_1: An authentication code emitted by the device. + + :type auth_code_2: string + :param auth_code_2: A subsequent authentication code emitted + by the device. + + """ + params = {'UserName': user_name, + 'SerialNumber': serial_number, + 'AuthenticationCode1': auth_code_1, + 'AuthenticationCode2': auth_code_2} + return self.get_response('ResyncMFADevice', params) + + # + # Login Profiles + # + + def get_login_profiles(self, user_name): + """ + Retrieves the login profile for the specified user. + + :type user_name: string + :param user_name: The username of the user + + """ + params = {'UserName': user_name} + return self.get_response('GetLoginProfile', params) + + def create_login_profile(self, user_name, password): + """ + Creates a login profile for the specified user, give the user the + ability to access AWS services and the AWS Management Console. + + :type user_name: string + :param user_name: The name of the user + + :type password: string + :param password: The new password for the user + + """ + params = {'UserName': user_name, + 'Password': password} + return self.get_response('CreateLoginProfile', params) + + def delete_login_profile(self, user_name): + """ + Deletes the login profile associated with the specified user. + + :type user_name: string + :param user_name: The name of the user to delete. + + """ + params = {'UserName': user_name} + return self.get_response('DeleteLoginProfile', params) + + def update_login_profile(self, user_name, password): + """ + Resets the password associated with the user's login profile. + + :type user_name: string + :param user_name: The name of the user + + :type password: string + :param password: The new password for the user + + """ + params = {'UserName': user_name, + 'Password': password} + return self.get_response('UpdateLoginProfile', params) + + def create_account_alias(self, alias): + """ + Creates a new alias for the AWS account. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + + :type alias: string + :param alias: The alias to attach to the account. + """ + params = {'AccountAlias': alias} + return self.get_response('CreateAccountAlias', params) + + def delete_account_alias(self, alias): + """ + Deletes an alias for the AWS account. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + + :type alias: string + :param alias: The alias to remove from the account. + """ + params = {'AccountAlias': alias} + return self.get_response('DeleteAccountAlias', params) + + def get_account_alias(self): + """ + Get the alias for the current account. + + This is referred to in the docs as list_account_aliases, + but it seems you can only have one account alias currently. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + """ + return self.get_response('ListAccountAliases', {}, + list_marker='AccountAliases') + + def get_signin_url(self, service='ec2'): + """ + Get the URL where IAM users can use their login profile to sign in + to this account's console. + + :type service: string + :param service: Default service to go to in the console. + """ + alias = self.get_account_alias() + + if not alias: + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + resp = alias.get('list_account_aliases_response', {}) + result = resp.get('list_account_aliases_result', {}) + aliases = result.get('account_aliases', []) + + if not len(aliases): + raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.') + + # We'll just use the first one we find. + alias = aliases[0] + + if self.host == 'iam.us-gov.amazonaws.com': + return "https://%s.signin.amazonaws-us-gov.com/console/%s" % ( + alias, + service + ) + elif self.host.endswith('amazonaws.com.cn'): + return "https://%s.signin.amazonaws.cn/console/%s" % ( + alias, + service + ) + else: + return "https://%s.signin.aws.amazon.com/console/%s" % ( + alias, + service + ) + + def get_account_summary(self): + """ + Get the alias for the current account. + + This is referred to in the docs as list_account_aliases, + but it seems you can only have one account alias currently. + + For more information on account id aliases, please see + http://goo.gl/ToB7G + """ + return self.get_object('GetAccountSummary', {}, SummaryMap) + + # + # IAM Roles + # + + def add_role_to_instance_profile(self, instance_profile_name, role_name): + """ + Adds the specified role to the specified instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to update. + + :type role_name: string + :param role_name: Name of the role to add. + """ + return self.get_response('AddRoleToInstanceProfile', + {'InstanceProfileName': instance_profile_name, + 'RoleName': role_name}) + + def create_instance_profile(self, instance_profile_name, path=None): + """ + Creates a new instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to create. + + :type path: string + :param path: The path to the instance profile. + """ + params = {'InstanceProfileName': instance_profile_name} + if path is not None: + params['Path'] = path + return self.get_response('CreateInstanceProfile', params) + + def _build_policy(self, assume_role_policy_document=None): + if assume_role_policy_document is not None: + if isinstance(assume_role_policy_document, six.string_types): + # Historically, they had to pass a string. If it's a string, + # assume the user has already handled it. + return assume_role_policy_document + else: + + for tld, policy in DEFAULT_POLICY_DOCUMENTS.items(): + if tld is 'default': + # Skip the default. We'll fall back to it if we don't find + # anything. + continue + + if self.host and self.host.endswith(tld): + assume_role_policy_document = policy + break + + if not assume_role_policy_document: + assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default'] + + # Dump the policy (either user-supplied ``dict`` or one of the defaults) + return json.dumps(assume_role_policy_document) + + def create_role(self, role_name, assume_role_policy_document=None, path=None): + """ + Creates a new role for your AWS account. + + The policy grants permission to an EC2 instance to assume the role. + The policy is URL-encoded according to RFC 3986. Currently, only EC2 + instances can assume roles. + + :type role_name: string + :param role_name: Name of the role to create. + + :type assume_role_policy_document: ``string`` or ``dict`` + :param assume_role_policy_document: The policy that grants an entity + permission to assume the role. + + :type path: string + :param path: The path to the role. + """ + params = { + 'RoleName': role_name, + 'AssumeRolePolicyDocument': self._build_policy( + assume_role_policy_document + ), + } + if path is not None: + params['Path'] = path + return self.get_response('CreateRole', params) + + def delete_instance_profile(self, instance_profile_name): + """ + Deletes the specified instance profile. The instance profile must not + have an associated role. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to delete. + """ + return self.get_response( + 'DeleteInstanceProfile', + {'InstanceProfileName': instance_profile_name}) + + def delete_role(self, role_name): + """ + Deletes the specified role. The role must not have any policies + attached. + + :type role_name: string + :param role_name: Name of the role to delete. + """ + return self.get_response('DeleteRole', {'RoleName': role_name}) + + def delete_role_policy(self, role_name, policy_name): + """ + Deletes the specified policy associated with the specified role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + + :type policy_name: string + :param policy_name: Name of the policy to delete. + """ + return self.get_response( + 'DeleteRolePolicy', + {'RoleName': role_name, 'PolicyName': policy_name}) + + def get_instance_profile(self, instance_profile_name): + """ + Retrieves information about the specified instance profile, including + the instance profile's path, GUID, ARN, and role. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to get + information about. + """ + return self.get_response('GetInstanceProfile', + {'InstanceProfileName': instance_profile_name}) + + def get_role(self, role_name): + """ + Retrieves information about the specified role, including the role's + path, GUID, ARN, and the policy granting permission to EC2 to assume + the role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + """ + return self.get_response('GetRole', {'RoleName': role_name}) + + def get_role_policy(self, role_name, policy_name): + """ + Retrieves the specified policy document for the specified role. + + :type role_name: string + :param role_name: Name of the role associated with the policy. + + :type policy_name: string + :param policy_name: Name of the policy to get. + """ + return self.get_response('GetRolePolicy', + {'RoleName': role_name, + 'PolicyName': policy_name}) + + def list_instance_profiles(self, path_prefix=None, marker=None, + max_items=None): + """ + Lists the instance profiles that have the specified path prefix. If + there are none, the action returns an empty list. + + :type path_prefix: string + :param path_prefix: The path prefix for filtering the results. For + example: /application_abc/component_xyz/, which would get all + instance profiles whose path starts with + /application_abc/component_xyz/. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + + return self.get_response('ListInstanceProfiles', params, + list_marker='InstanceProfiles') + + def list_instance_profiles_for_role(self, role_name, marker=None, + max_items=None): + """ + Lists the instance profiles that have the specified associated role. If + there are none, the action returns an empty list. + + :type role_name: string + :param role_name: The name of the role to list instance profiles for. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + Marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {'RoleName': role_name} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListInstanceProfilesForRole', params, + list_marker='InstanceProfiles') + + def list_role_policies(self, role_name, marker=None, max_items=None): + """ + Lists the names of the policies associated with the specified role. If + there are none, the action returns an empty list. + + :type role_name: string + :param role_name: The name of the role to list policies for. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {'RoleName': role_name} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListRolePolicies', params, + list_marker='PolicyNames') + + def list_roles(self, path_prefix=None, marker=None, max_items=None): + """ + Lists the roles that have the specified path prefix. If there are none, + the action returns an empty list. + + :type path_prefix: string + :param path_prefix: The path prefix for filtering the results. + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response + where the results are truncated. Set it to the value of the + marker element in the response you just received. + + :type max_items: int + :param max_items: Use this parameter only when paginating results to + indicate the maximum number of user names you want in the response. + """ + params = {} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response('ListRoles', params, list_marker='Roles') + + def put_role_policy(self, role_name, policy_name, policy_document): + """ + Adds (or updates) a policy document associated with the specified role. + + :type role_name: string + :param role_name: Name of the role to associate the policy with. + + :type policy_name: string + :param policy_name: Name of the policy document. + + :type policy_document: string + :param policy_document: The policy document. + """ + return self.get_response('PutRolePolicy', + {'RoleName': role_name, + 'PolicyName': policy_name, + 'PolicyDocument': policy_document}) + + def remove_role_from_instance_profile(self, instance_profile_name, + role_name): + """ + Removes the specified role from the specified instance profile. + + :type instance_profile_name: string + :param instance_profile_name: Name of the instance profile to update. + + :type role_name: string + :param role_name: Name of the role to remove. + """ + return self.get_response('RemoveRoleFromInstanceProfile', + {'InstanceProfileName': instance_profile_name, + 'RoleName': role_name}) + + def update_assume_role_policy(self, role_name, policy_document): + """ + Updates the policy that grants an entity permission to assume a role. + Currently, only an Amazon EC2 instance can assume a role. + + :type role_name: string + :param role_name: Name of the role to update. + + :type policy_document: string + :param policy_document: The policy that grants an entity permission to + assume the role. + """ + return self.get_response('UpdateAssumeRolePolicy', + {'RoleName': role_name, + 'PolicyDocument': policy_document}) + + def create_saml_provider(self, saml_metadata_document, name): + """ + Creates an IAM entity to describe an identity provider (IdP) + that supports SAML 2.0. + + The SAML provider that you create with this operation can be + used as a principal in a role's trust policy to establish a + trust relationship between AWS and a SAML identity provider. + You can create an IAM role that supports Web-based single + sign-on (SSO) to the AWS Management Console or one that + supports API access to AWS. + + When you create the SAML provider, you upload an a SAML + metadata document that you get from your IdP and that includes + the issuer's name, expiration information, and keys that can + be used to validate the SAML authentication response + (assertions) that are received from the IdP. You must generate + the metadata document using the identity management software + that is used as your organization's IdP. + This operation requires `Signature Version 4`_. + For more information, see `Giving Console Access Using SAML`_ + and `Creating Temporary Security Credentials for SAML + Federation`_ in the Using Temporary Credentials guide. + + :type saml_metadata_document: string + :param saml_metadata_document: An XML document generated by an identity + provider (IdP) that supports SAML 2.0. The document includes the + issuer's name, expiration information, and keys that can be used to + validate the SAML authentication response (assertions) that are + received from the IdP. You must generate the metadata document + using the identity management software that is used as your + organization's IdP. + For more information, see `Creating Temporary Security Credentials for + SAML Federation`_ in the Using Temporary Security Credentials + guide. + + :type name: string + :param name: The name of the provider to create. + + """ + params = { + 'SAMLMetadataDocument': saml_metadata_document, + 'Name': name, + } + return self.get_response('CreateSAMLProvider', params) + + def list_saml_providers(self): + """ + Lists the SAML providers in the account. + This operation requires `Signature Version 4`_. + """ + return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList') + + def get_saml_provider(self, saml_provider_arn): + """ + Returns the SAML provider metadocument that was uploaded when + the provider was created or updated. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to get information about. + + """ + params = {'SAMLProviderArn': saml_provider_arn} + return self.get_response('GetSAMLProvider', params) + + def update_saml_provider(self, saml_provider_arn, saml_metadata_document): + """ + Updates the metadata document for an existing SAML provider. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to update. + + :type saml_metadata_document: string + :param saml_metadata_document: An XML document generated by an identity + provider (IdP) that supports SAML 2.0. The document includes the + issuer's name, expiration information, and keys that can be used to + validate the SAML authentication response (assertions) that are + received from the IdP. You must generate the metadata document + using the identity management software that is used as your + organization's IdP. + + """ + params = { + 'SAMLMetadataDocument': saml_metadata_document, + 'SAMLProviderArn': saml_provider_arn, + } + return self.get_response('UpdateSAMLProvider', params) + + def delete_saml_provider(self, saml_provider_arn): + """ + Deletes a SAML provider. + + Deleting the provider does not update any roles that reference + the SAML provider as a principal in their trust policies. Any + attempt to assume a role that references a SAML provider that + has been deleted will fail. + This operation requires `Signature Version 4`_. + + :type saml_provider_arn: string + :param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML + provider to delete. + + """ + params = {'SAMLProviderArn': saml_provider_arn} + return self.get_response('DeleteSAMLProvider', params) + + # + # IAM Reports + # + + def generate_credential_report(self): + """ + Generates a credential report for an account + + A new credential report can only be generated every 4 hours. If one + hasn't been generated in the last 4 hours then get_credential_report + will error when called + """ + params = {} + return self.get_response('GenerateCredentialReport', params) + + def get_credential_report(self): + """ + Retrieves a credential report for an account + + A report must have been generated in the last 4 hours to succeed. + The report is returned as a base64 encoded blob within the response. + """ + params = {} + return self.get_response('GetCredentialReport', params) + + def create_virtual_mfa_device(self, path, device_name): + """ + Creates a new virtual MFA device for the AWS account. + + After creating the virtual MFA, use enable-mfa-device to + attach the MFA device to an IAM user. + + :type path: string + :param path: The path for the virtual MFA device. + + :type device_name: string + :param device_name: The name of the virtual MFA device. + Used with path to uniquely identify a virtual MFA device. + + """ + params = { + 'Path': path, + 'VirtualMFADeviceName': device_name + } + return self.get_response('CreateVirtualMFADevice', params) + + # + # IAM password policy + # + + def get_account_password_policy(self): + """ + Returns the password policy for the AWS account. + """ + params = {} + return self.get_response('GetAccountPasswordPolicy', params) + + def delete_account_password_policy(self): + """ + Delete the password policy currently set for the AWS account. + """ + params = {} + return self.get_response('DeleteAccountPasswordPolicy', params) + + def update_account_password_policy(self, allow_users_to_change_password=None, + hard_expiry=None, max_password_age=None , + minimum_password_length=None , + password_reuse_prevention=None, + require_lowercase_characters=None, + require_numbers=None, require_symbols=None , + require_uppercase_characters=None): + """ + Update the password policy for the AWS account. + + Notes: unset parameters will be reset to Amazon default settings! + Most of the password policy settings are enforced the next time your users + change their passwords. When you set minimum length and character type + requirements, they are enforced the next time your users change their + passwords - users are not forced to change their existing passwords, even + if the pre-existing passwords do not adhere to the updated password + policy. When you set a password expiration period, the expiration period + is enforced immediately. + + :type allow_users_to_change_password: bool + :param allow_users_to_change_password: Allows all IAM users in your account + to use the AWS Management Console to change their own passwords. + + :type hard_expiry: bool + :param hard_expiry: Prevents IAM users from setting a new password after + their password has expired. + + :type max_password_age: int + :param max_password_age: The number of days that an IAM user password is valid. + + :type minimum_password_length: int + :param minimum_password_length: The minimum number of characters allowed in + an IAM user password. + + :type password_reuse_prevention: int + :param password_reuse_prevention: Specifies the number of previous passwords + that IAM users are prevented from reusing. + + :type require_lowercase_characters: bool + :param require_lowercase_characters: Specifies whether IAM user passwords + must contain at least one lowercase character from the ISO basic Latin + alphabet (``a`` to ``z``). + + :type require_numbers: bool + :param require_numbers: Specifies whether IAM user passwords must contain at + least one numeric character (``0`` to ``9``). + + :type require_symbols: bool + :param require_symbols: Specifies whether IAM user passwords must contain at + least one of the following non-alphanumeric characters: + ``! @ # $ % ^ & * ( ) _ + - = [ ] { } | '`` + + :type require_uppercase_characters: bool + :param require_uppercase_characters: Specifies whether IAM user passwords + must contain at least one uppercase character from the ISO basic Latin + alphabet (``A`` to ``Z``). + """ + params = {} + if allow_users_to_change_password is not None and type(allow_users_to_change_password) is bool: + params['AllowUsersToChangePassword'] = str(allow_users_to_change_password).lower() + if hard_expiry is not None and type(allow_users_to_change_password) is bool: + params['HardExpiry'] = str(hard_expiry).lower() + if max_password_age is not None: + params['MaxPasswordAge'] = max_password_age + if minimum_password_length is not None: + params['MinimumPasswordLength'] = minimum_password_length + if password_reuse_prevention is not None: + params['PasswordReusePrevention'] = password_reuse_prevention + if require_lowercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireLowercaseCharacters'] = str(require_lowercase_characters).lower() + if require_numbers is not None and type(allow_users_to_change_password) is bool: + params['RequireNumbers'] = str(require_numbers).lower() + if require_symbols is not None and type(allow_users_to_change_password) is bool: + params['RequireSymbols'] = str(require_symbols).lower() + if require_uppercase_characters is not None and type(allow_users_to_change_password) is bool: + params['RequireUppercaseCharacters'] = str(require_uppercase_characters).lower() + return self.get_response('UpdateAccountPasswordPolicy', params) + + def create_policy(self, policy_name, policy_document, path='/', + description=None): + """ + Create a policy. + + :type policy_name: string + :param policy_name: The name of the new policy + + :type policy_document string + :param policy_document: The document of the new policy + + :type path: string + :param path: The path in which the policy will be created. + Defaults to /. + + :type description: string + :param path: A description of the new policy. + + """ + params = {'PolicyName': policy_name, + 'PolicyDocument': policy_document, + 'Path': path} + if description is not None: + params['Description'] = str(description) + + return self.get_response('CreatePolicy', params) + + def create_policy_version( + self, + policy_arn, + policy_document, + set_as_default=None): + """ + Create a policy version. + + :type policy_arn: string + :param policy_arn: The ARN of the policy + + :type policy_document string + :param policy_document: The document of the new policy version + + :type set_as_default: bool + :param set_as_default: Sets the policy version as default + Defaults to None. + + """ + params = {'PolicyArn': policy_arn, + 'PolicyDocument': policy_document} + if type(set_as_default) == bool: + params['SetAsDefault'] = str(set_as_default).lower() + return self.get_response('CreatePolicyVersion', params) + + def delete_policy(self, policy_arn): + """ + Delete a policy. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to delete + + """ + params = {'PolicyArn': policy_arn} + return self.get_response('DeletePolicy', params) + + def delete_policy_version(self, policy_arn, version_id): + """ + Delete a policy version. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to delete a version from + + :type version_id: string + :param version_id: The id of the version to delete + + """ + params = {'PolicyArn': policy_arn, + 'VersionId': version_id} + return self.get_response('DeletePolicyVersion', params) + + def get_policy(self, policy_arn): + """ + Get policy information. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to get information for + + """ + params = {'PolicyArn': policy_arn} + return self.get_response('GetPolicy', params) + + def get_policy_version(self, policy_arn, version_id): + """ + Get policy information. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to get information for a + specific version + + :type version_id: string + :param version_id: The id of the version to get information for + + """ + params = {'PolicyArn': policy_arn, + 'VersionId': version_id} + return self.get_response('GetPolicyVersion', params) + + def list_policies(self, marker=None, max_items=None, only_attached=None, + path_prefix=None, scope=None): + """ + List policies of account. + + :type marker: string + :param marker: A marker used for pagination (received from previous + accesses) + + :type max_items: int + :param max_items: Send only max_items; allows paginations + + :type only_attached: bool + :param only_attached: Send only policies attached to other resources + + :type path_prefix: string + :param path_prefix: Send only items prefixed by this path + + :type scope: string + :param scope: AWS|Local. Choose between AWS policies or your own + """ + params = {} + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + if type(only_attached) == bool: + params['OnlyAttached'] = str(only_attached).lower() + if scope is not None: + params['Scope'] = scope + return self.get_response( + 'ListPolicies', + params, + list_marker='Policies') + + def list_policy_versions(self, policy_arn, marker=None, max_items=None): + """ + List policy versions. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to get versions of + + :type marker: string + :param marker: A marker used for pagination (received from previous + accesses) + + :type max_items: int + :param max_items: Send only max_items; allows paginations + + """ + params = {'PolicyArn': policy_arn} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.get_response( + 'ListPolicyVersions', + params, + list_marker='Versions') + + def set_default_policy_version(self, policy_arn, version_id): + """ + Set default policy version. + + :type policy_arn: string + :param policy_arn: The ARN of the policy to set the default version + for + + :type version_id: string + :param version_id: The id of the version to set as default + """ + params = {'PolicyArn': policy_arn, + 'VersionId': version_id} + return self.get_response('SetDefaultPolicyVersion', params) + + def list_entities_for_policy(self, policy_arn, path_prefix=None, + marker=None, max_items=None, + entity_filter=None): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to get entities for + + :type marker: string + :param marker: A marker used for pagination (received from previous + accesses) + + :type max_items: int + :param max_items: Send only max_items; allows paginations + + :type path_prefix: string + :param path_prefix: Send only items prefixed by this path + + :type entity_filter: string + :param entity_filter: Which entity type of User | Role | Group | + LocalManagedPolicy | AWSManagedPolicy to return + + """ + params = {'PolicyArn': policy_arn} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + if path_prefix is not None: + params['PathPrefix'] = path_prefix + if entity_filter is not None: + params['EntityFilter'] = entity_filter + return self.get_response('ListEntitiesForPolicy', params, + list_marker=('PolicyGroups', + 'PolicyUsers', + 'PolicyRoles')) + + def attach_group_policy(self, policy_arn, group_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to attach + + :type group_name: string + :param group_name: Group to attach the policy to + + """ + params = {'PolicyArn': policy_arn, 'GroupName': group_name} + return self.get_response('AttachGroupPolicy', params) + + def attach_role_policy(self, policy_arn, role_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to attach + + :type role_name: string + :param role_name: Role to attach the policy to + + """ + params = {'PolicyArn': policy_arn, 'RoleName': role_name} + return self.get_response('AttachRolePolicy', params) + + def attach_user_policy(self, policy_arn, user_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to attach + + :type user_name: string + :param user_name: User to attach the policy to + + """ + params = {'PolicyArn': policy_arn, 'UserName': user_name} + return self.get_response('AttachUserPolicy', params) + + def detach_group_policy(self, policy_arn, group_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to detach + + :type group_name: string + :param group_name: Group to detach the policy from + + """ + params = {'PolicyArn': policy_arn, 'GroupName': group_name} + return self.get_response('DetachGroupPolicy', params) + + def detach_role_policy(self, policy_arn, role_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to detach + + :type role_name: string + :param role_name: Role to detach the policy from + + """ + params = {'PolicyArn': policy_arn, 'RoleName': role_name} + return self.get_response('DetachRolePolicy', params) + + def detach_user_policy(self, policy_arn, user_name): + """ + :type policy_arn: string + :param policy_arn: The ARN of the policy to detach + + :type user_name: string + :param user_name: User to detach the policy from + + """ + params = {'PolicyArn': policy_arn, 'UserName': user_name} + return self.get_response('DetachUserPolicy', params) diff --git a/ext/boto/iam/summarymap.py b/ext/boto/iam/summarymap.py new file mode 100644 index 0000000000..b7976bb7e1 --- /dev/null +++ b/ext/boto/iam/summarymap.py @@ -0,0 +1,42 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class SummaryMap(dict): + + def __init__(self, parent=None): + self.parent = parent + dict.__init__(self) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + elif name == 'value': + try: + self[self._name] = int(value) + except ValueError: + self[self._name] = value + else: + setattr(self, name, value) diff --git a/ext/boto/jsonresponse.py b/ext/boto/jsonresponse.py new file mode 100644 index 0000000000..f872b42990 --- /dev/null +++ b/ext/boto/jsonresponse.py @@ -0,0 +1,168 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +from boto import utils + + +class XmlHandler(xml.sax.ContentHandler): + + def __init__(self, root_node, connection): + self.connection = connection + self.nodes = [('root', root_node)] + self.current_text = '' + + def startElement(self, name, attrs): + self.current_text = '' + t = self.nodes[-1][1].startElement(name, attrs, self.connection) + if t is not None: + if isinstance(t, tuple): + self.nodes.append(t) + else: + self.nodes.append((name, t)) + + def endElement(self, name): + self.nodes[-1][1].endElement(name, self.current_text, self.connection) + if self.nodes[-1][0] == name: + self.nodes.pop() + self.current_text = '' + + def characters(self, content): + self.current_text += content + + def parse(self, s): + if not isinstance(s, bytes): + s = s.encode('utf-8') + xml.sax.parseString(s, self) + + +class Element(dict): + + def __init__(self, connection=None, element_name=None, + stack=None, parent=None, list_marker=('Set',), + item_marker=('member', 'item'), + pythonize_name=False): + dict.__init__(self) + self.connection = connection + self.element_name = element_name + self.list_marker = utils.mklist(list_marker) + self.item_marker = utils.mklist(item_marker) + if stack is None: + self.stack = [] + else: + self.stack = stack + self.pythonize_name = pythonize_name + self.parent = parent + + def __getattr__(self, key): + if key in self: + return self[key] + for k in self: + e = self[k] + if isinstance(e, Element): + try: + return getattr(e, key) + except AttributeError: + pass + raise AttributeError + + def get_name(self, name): + if self.pythonize_name: + name = utils.pythonize_name(name) + return name + + def startElement(self, name, attrs, connection): + self.stack.append(name) + for lm in self.list_marker: + if name.endswith(lm): + l = ListElement(self.connection, name, self.list_marker, + self.item_marker, self.pythonize_name) + self[self.get_name(name)] = l + return l + if len(self.stack) > 0: + element_name = self.stack[-1] + e = Element(self.connection, element_name, self.stack, self, + self.list_marker, self.item_marker, + self.pythonize_name) + self[self.get_name(element_name)] = e + return (element_name, e) + else: + return None + + def endElement(self, name, value, connection): + if len(self.stack) > 0: + self.stack.pop() + value = value.strip() + if value: + if isinstance(self.parent, Element): + self.parent[self.get_name(name)] = value + elif isinstance(self.parent, ListElement): + self.parent.append(value) + + +class ListElement(list): + + def __init__(self, connection=None, element_name=None, + list_marker=['Set'], item_marker=('member', 'item'), + pythonize_name=False): + list.__init__(self) + self.connection = connection + self.element_name = element_name + self.list_marker = list_marker + self.item_marker = item_marker + self.pythonize_name = pythonize_name + + def get_name(self, name): + if self.pythonize_name: + name = utils.pythonize_name(name) + return name + + def startElement(self, name, attrs, connection): + for lm in self.list_marker: + if name.endswith(lm): + l = ListElement(self.connection, name, + self.list_marker, self.item_marker, + self.pythonize_name) + setattr(self, self.get_name(name), l) + return l + if name in self.item_marker: + e = Element(self.connection, name, parent=self, + list_marker=self.list_marker, + item_marker=self.item_marker, + pythonize_name=self.pythonize_name) + self.append(e) + return e + else: + return None + + def endElement(self, name, value, connection): + if name == self.element_name: + if len(self) > 0: + empty = [] + for e in self: + if isinstance(e, Element): + if len(e) == 0: + empty.append(e) + for e in empty: + self.remove(e) + else: + setattr(self, self.get_name(name), value) diff --git a/ext/boto/kinesis/__init__.py b/ext/boto/kinesis/__init__.py new file mode 100644 index 0000000000..9972a2a500 --- /dev/null +++ b/ext/boto/kinesis/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Kinesis service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.kinesis.layer1 import KinesisConnection + return get_regions('kinesis', connection_cls=KinesisConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.kinesis.layer1 import KinesisConnection + return connect('kinesis', region_name, + connection_cls=KinesisConnection, **kw_params) diff --git a/ext/boto/kinesis/exceptions.py b/ext/boto/kinesis/exceptions.py new file mode 100644 index 0000000000..708f46369a --- /dev/null +++ b/ext/boto/kinesis/exceptions.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class ProvisionedThroughputExceededException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class ExpiredIteratorException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class InvalidArgumentException(BotoServerError): + pass + + +class SubscriptionRequiredException(BotoServerError): + pass diff --git a/ext/boto/kinesis/layer1.py b/ext/boto/kinesis/layer1.py new file mode 100644 index 0000000000..a58048cccb --- /dev/null +++ b/ext/boto/kinesis/layer1.py @@ -0,0 +1,879 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import base64 +import boto + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.kinesis import exceptions +from boto.compat import json +from boto.compat import six + + +class KinesisConnection(AWSQueryConnection): + """ + Amazon Kinesis Service API Reference + Amazon Kinesis is a managed service that scales elastically for + real time processing of streaming big data. + """ + APIVersion = "2013-12-02" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com" + ServiceName = "Kinesis" + TargetPrefix = "Kinesis_20131202" + ResponseError = JSONResponseError + + _faults = { + "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException, + "LimitExceededException": exceptions.LimitExceededException, + "ExpiredIteratorException": exceptions.ExpiredIteratorException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "InvalidArgumentException": exceptions.InvalidArgumentException, + "SubscriptionRequiredException": exceptions.SubscriptionRequiredException + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + super(KinesisConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_tags_to_stream(self, stream_name, tags): + """ + Adds or updates tags for the specified Amazon Kinesis stream. + Each stream can have up to 10 tags. + + If tags have already been assigned to the stream, + `AddTagsToStream` overwrites any existing tags that correspond + to the specified tag keys. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tags: map + :param tags: The set of key-value pairs to use to create the tags. + + """ + params = {'StreamName': stream_name, 'Tags': tags, } + return self.make_request(action='AddTagsToStream', + body=json.dumps(params)) + + def create_stream(self, stream_name, shard_count): + """ + Creates a Amazon Kinesis stream. A stream captures and + transports data records that are continuously emitted from + different data sources or producers . Scale-out within an + Amazon Kinesis stream is explicitly supported by means of + shards, which are uniquely identified groups of data records + in an Amazon Kinesis stream. + + You specify and control the number of shards that a stream is + composed of. Each open shard can support up to 5 read + transactions per second, up to a maximum total of 2 MB of data + read per second. Each shard can support up to 1000 records + written per second, up to a maximum total of 1 MB data written + per second. You can add shards to a stream if the amount of + data input increases and you can remove shards if the amount + of data input decreases. + + The stream name identifies the stream. The name is scoped to + the AWS account used by the application. It is also scoped by + region. That is, two streams in two different accounts can + have the same name, and two streams in the same account, but + in two different regions, can have the same name. + + `CreateStream` is an asynchronous operation. Upon receiving a + `CreateStream` request, Amazon Kinesis immediately returns and + sets the stream status to `CREATING`. After the stream is + created, Amazon Kinesis sets the stream status to `ACTIVE`. + You should perform read and write operations only on an + `ACTIVE` stream. + + You receive a `LimitExceededException` when making a + `CreateStream` request if you try to do one of the following: + + + + Have more than five streams in the `CREATING` state at any + point in time. + + Create more shards than are authorized for your account. + + + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. + + You can use `DescribeStream` to check the stream status, which + is returned in `StreamStatus`. + + `CreateStream` has a limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: A name to identify the stream. The stream name is + scoped to the AWS account used by the application that creates the + stream. It is also scoped by region. That is, two streams in two + different AWS accounts can have the same name, and two streams in + the same AWS account, but in two different regions, can have the + same name. + + :type shard_count: integer + :param shard_count: The number of shards that the stream will use. The + throughput of the stream is a function of the number of shards; + more shards are required for greater provisioned throughput. + **Note:** The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, `contact + AWS Support`_ to increase the limit on your account. + + """ + params = { + 'StreamName': stream_name, + 'ShardCount': shard_count, + } + return self.make_request(action='CreateStream', + body=json.dumps(params)) + + def delete_stream(self, stream_name): + """ + Deletes a stream and all its shards and data. You must shut + down any applications that are operating on the stream before + you delete the stream. If an application attempts to operate + on a deleted stream, it will receive the exception + `ResourceNotFoundException`. + + If the stream is in the `ACTIVE` state, you can delete it. + After a `DeleteStream` request, the specified stream is in the + `DELETING` state until Amazon Kinesis completes the deletion. + + **Note:** Amazon Kinesis might continue to accept data read + and write operations, such as PutRecord, PutRecords, and + GetRecords, on a stream in the `DELETING` state until the + stream deletion is complete. + + When you delete a stream, any shards in that stream are also + deleted, and any tags are dissociated from the stream. + + You can use the DescribeStream operation to check the state of + the stream, which is returned in `StreamStatus`. + + `DeleteStream` has a limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream to delete. + + """ + params = {'StreamName': stream_name, } + return self.make_request(action='DeleteStream', + body=json.dumps(params)) + + def describe_stream(self, stream_name, limit=None, + exclusive_start_shard_id=None): + """ + Describes the specified stream. + + The information about the stream includes its current status, + its Amazon Resource Name (ARN), and an array of shard objects. + For each shard object, there is information about the hash key + and sequence number ranges that the shard spans, and the IDs + of any earlier shards that played in a role in creating the + shard. A sequence number is the identifier associated with + every record ingested in the Amazon Kinesis stream. The + sequence number is assigned when a record is put into the + stream. + + You can limit the number of returned shards using the `Limit` + parameter. The number of shards in a stream may be too large + to return from a single call to `DescribeStream`. You can + detect this by using the `HasMoreShards` flag in the returned + output. `HasMoreShards` is set to `True` when there is more + data available. + + `DescribeStream` is a paginated operation. If there are more + shards available, you can request them using the shard ID of + the last shard returned. Specify this ID in the + `ExclusiveStartShardId` parameter in a subsequent request to + `DescribeStream`. + + `DescribeStream` has a limit of 10 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream to describe. + + :type limit: integer + :param limit: The maximum number of shards to return. + + :type exclusive_start_shard_id: string + :param exclusive_start_shard_id: The shard ID of the shard to start + with. + + """ + params = {'StreamName': stream_name, } + if limit is not None: + params['Limit'] = limit + if exclusive_start_shard_id is not None: + params['ExclusiveStartShardId'] = exclusive_start_shard_id + return self.make_request(action='DescribeStream', + body=json.dumps(params)) + + def get_records(self, shard_iterator, limit=None, b64_decode=True): + """ + Gets data records from a shard. + + Specify a shard iterator using the `ShardIterator` parameter. + The shard iterator specifies the position in the shard from + which you want to start reading data records sequentially. If + there are no records available in the portion of the shard + that the iterator points to, `GetRecords` returns an empty + list. Note that it might take multiple calls to get to a + portion of the shard that contains records. + + You can scale by provisioning multiple shards. Your + application should have one thread per shard, each reading + continuously from its stream. To read from a stream + continually, call `GetRecords` in a loop. Use GetShardIterator + to get the shard iterator to specify in the first `GetRecords` + call. `GetRecords` returns a new shard iterator in + `NextShardIterator`. Specify the shard iterator returned in + `NextShardIterator` in subsequent calls to `GetRecords`. Note + that if the shard has been closed, the shard iterator can't + return more data and `GetRecords` returns `null` in + `NextShardIterator`. You can terminate the loop when the shard + is closed, or when the shard iterator reaches the record with + the sequence number or other attribute that marks it as the + last record to process. + + Each data record can be up to 50 KB in size, and each shard + can read up to 2 MB per second. You can ensure that your calls + don't exceed the maximum supported size or throughput by using + the `Limit` parameter to specify the maximum number of records + that `GetRecords` can return. Consider your average record + size when determining this limit. For example, if your average + record size is 40 KB, you can limit the data returned to about + 1 MB per call by specifying 25 as the limit. + + The size of the data returned by `GetRecords` will vary + depending on the utilization of the shard. The maximum size of + data that `GetRecords` can return is 10 MB. If a call returns + 10 MB of data, subsequent calls made within the next 5 seconds + throw `ProvisionedThroughputExceededException`. If there is + insufficient provisioned throughput on the shard, subsequent + calls made within the next 1 second throw + `ProvisionedThroughputExceededException`. Note that + `GetRecords` won't return any data when it throws an + exception. For this reason, we recommend that you wait one + second between calls to `GetRecords`; however, it's possible + that the application will get exceptions for longer than 1 + second. + + To detect whether the application is falling behind in + processing, add a timestamp to your records and note how long + it takes to process them. You can also monitor how much data + is in a stream using the CloudWatch metrics for write + operations ( `PutRecord` and `PutRecords`). For more + information, see `Monitoring Amazon Kinesis with Amazon + CloudWatch`_ in the Amazon Kinesis Developer Guide . + + :type shard_iterator: string + :param shard_iterator: The position in the shard from which you want to + start sequentially reading data records. A shard iterator specifies + this position using the sequence number of a data record in the + shard. + + :type limit: integer + :param limit: The maximum number of records to return. Specify a value + of up to 10,000. If you specify a value that is greater than + 10,000, `GetRecords` throws `InvalidArgumentException`. + + :type b64_decode: boolean + :param b64_decode: Decode the Base64-encoded ``Data`` field of records. + + """ + params = {'ShardIterator': shard_iterator, } + if limit is not None: + params['Limit'] = limit + + response = self.make_request(action='GetRecords', + body=json.dumps(params)) + + # Base64 decode the data + if b64_decode: + for record in response.get('Records', []): + record['Data'] = base64.b64decode( + record['Data'].encode('utf-8')).decode('utf-8') + + return response + + def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, + starting_sequence_number=None): + """ + Gets a shard iterator. A shard iterator expires five minutes + after it is returned to the requester. + + A shard iterator specifies the position in the shard from + which to start reading data records sequentially. A shard + iterator specifies this position using the sequence number of + a data record in a shard. A sequence number is the identifier + associated with every record ingested in the Amazon Kinesis + stream. The sequence number is assigned when a record is put + into the stream. + + You must specify the shard iterator type. For example, you can + set the `ShardIteratorType` parameter to read exactly from the + position denoted by a specific sequence number by using the + `AT_SEQUENCE_NUMBER` shard iterator type, or right after the + sequence number by using the `AFTER_SEQUENCE_NUMBER` shard + iterator type, using sequence numbers returned by earlier + calls to PutRecord, PutRecords, GetRecords, or DescribeStream. + You can specify the shard iterator type `TRIM_HORIZON` in the + request to cause `ShardIterator` to point to the last + untrimmed record in the shard in the system, which is the + oldest data record in the shard. Or you can point to just + after the most recent record in the shard, by using the shard + iterator type `LATEST`, so that you always read the most + recent data in the shard. + + When you repeatedly read from an Amazon Kinesis stream use a + GetShardIterator request to get the first shard iterator to to + use in your first `GetRecords` request and then use the shard + iterator returned by the `GetRecords` request in + `NextShardIterator` for subsequent reads. A new shard iterator + is returned by every `GetRecords` request in + `NextShardIterator`, which you use in the `ShardIterator` + parameter of the next `GetRecords` request. + + If a `GetShardIterator` request is made too often, you receive + a `ProvisionedThroughputExceededException`. For more + information about throughput limits, see GetRecords. + + If the shard is closed, the iterator can't return more data, + and `GetShardIterator` returns `null` for its `ShardIterator`. + A shard can be closed using SplitShard or MergeShards. + + `GetShardIterator` has a limit of 5 transactions per second + per account per open shard. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type shard_id: string + :param shard_id: The shard ID of the shard to get the iterator for. + + :type shard_iterator_type: string + :param shard_iterator_type: + Determines how the shard iterator is used to start reading data records + from the shard. + + The following are the valid shard iterator types: + + + + AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted + by a specific sequence number. + + AFTER_SEQUENCE_NUMBER - Start reading right after the position + denoted by a specific sequence number. + + TRIM_HORIZON - Start reading at the last untrimmed record in the + shard in the system, which is the oldest data record in the shard. + + LATEST - Start reading just after the most recent record in the + shard, so that you always read the most recent data in the shard. + + :type starting_sequence_number: string + :param starting_sequence_number: The sequence number of the data record + in the shard from which to start reading from. + + :returns: A dictionary containing: + + 1) a `ShardIterator` with the value being the shard-iterator object + """ + + params = { + 'StreamName': stream_name, + 'ShardId': shard_id, + 'ShardIteratorType': shard_iterator_type, + } + if starting_sequence_number is not None: + params['StartingSequenceNumber'] = starting_sequence_number + return self.make_request(action='GetShardIterator', + body=json.dumps(params)) + + def list_streams(self, limit=None, exclusive_start_stream_name=None): + """ + Lists your streams. + + The number of streams may be too large to return from a single + call to `ListStreams`. You can limit the number of returned + streams using the `Limit` parameter. If you do not specify a + value for the `Limit` parameter, Amazon Kinesis uses the + default limit, which is currently 10. + + You can detect if there are more streams available to list by + using the `HasMoreStreams` flag from the returned output. If + there are more streams available, you can request more streams + by using the name of the last stream returned by the + `ListStreams` request in the `ExclusiveStartStreamName` + parameter in a subsequent request to `ListStreams`. The group + of stream names returned by the subsequent request is then + added to the list. You can continue this process until all the + stream names have been collected in the list. + + `ListStreams` has a limit of 5 transactions per second per + account. + + :type limit: integer + :param limit: The maximum number of streams to list. + + :type exclusive_start_stream_name: string + :param exclusive_start_stream_name: The name of the stream to start the + list with. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if exclusive_start_stream_name is not None: + params['ExclusiveStartStreamName'] = exclusive_start_stream_name + return self.make_request(action='ListStreams', + body=json.dumps(params)) + + def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, + limit=None): + """ + Lists the tags for the specified Amazon Kinesis stream. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type exclusive_start_tag_key: string + :param exclusive_start_tag_key: The key to use as the starting point + for the list of tags. If this parameter is set, `ListTagsForStream` + gets all tags that occur after `ExclusiveStartTagKey`. + + :type limit: integer + :param limit: The number of tags to return. If this number is less than + the total number of tags associated with the stream, `HasMoreTags` + is set to `True`. To list additional tags, set + `ExclusiveStartTagKey` to the last key in the response. + + """ + params = {'StreamName': stream_name, } + if exclusive_start_tag_key is not None: + params['ExclusiveStartTagKey'] = exclusive_start_tag_key + if limit is not None: + params['Limit'] = limit + return self.make_request(action='ListTagsForStream', + body=json.dumps(params)) + + def merge_shards(self, stream_name, shard_to_merge, + adjacent_shard_to_merge): + """ + Merges two adjacent shards in a stream and combines them into + a single shard to reduce the stream's capacity to ingest and + transport data. Two shards are considered adjacent if the + union of the hash key ranges for the two shards form a + contiguous set with no gaps. For example, if you have two + shards, one with a hash key range of 276...381 and the other + with a hash key range of 382...454, then you could merge these + two shards into a single shard that would have a hash key + range of 276...454. After the merge, the single child shard + receives data for all hash key values covered by the two + parent shards. + + `MergeShards` is called when there is a need to reduce the + overall capacity of a stream because of excess capacity that + is not being used. You must specify the shard to be merged and + the adjacent shard for a stream. For more information about + merging shards, see `Merge Two Shards`_ in the Amazon Kinesis + Developer Guide . + + If the stream is in the `ACTIVE` state, you can call + `MergeShards`. If a stream is in the `CREATING`, `UPDATING`, + or `DELETING` state, `MergeShards` returns a + `ResourceInUseException`. If the specified stream does not + exist, `MergeShards` returns a `ResourceNotFoundException`. + + You can use DescribeStream to check the state of the stream, + which is returned in `StreamStatus`. + + `MergeShards` is an asynchronous operation. Upon receiving a + `MergeShards` request, Amazon Kinesis immediately returns a + response and sets the `StreamStatus` to `UPDATING`. After the + operation is completed, Amazon Kinesis sets the `StreamStatus` + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. + + You use DescribeStream to determine the shard IDs that are + specified in the `MergeShards` request. + + If you try to operate on too many streams in parallel using + CreateStream, DeleteStream, `MergeShards` or SplitShard, you + will receive a `LimitExceededException`. + + `MergeShards` has limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream for the merge. + + :type shard_to_merge: string + :param shard_to_merge: The shard ID of the shard to combine with the + adjacent shard for the merge. + + :type adjacent_shard_to_merge: string + :param adjacent_shard_to_merge: The shard ID of the adjacent shard for + the merge. + + """ + params = { + 'StreamName': stream_name, + 'ShardToMerge': shard_to_merge, + 'AdjacentShardToMerge': adjacent_shard_to_merge, + } + return self.make_request(action='MergeShards', + body=json.dumps(params)) + + def put_record(self, stream_name, data, partition_key, + explicit_hash_key=None, + sequence_number_for_ordering=None, + exclusive_minimum_sequence_number=None, + b64_encode=True): + """ + This operation puts a data record into an Amazon Kinesis + stream from a producer. This operation must be called to send + data from the producer into the Amazon Kinesis stream for + real-time ingestion and subsequent processing. The `PutRecord` + operation requires the name of the stream that captures, + stores, and transports the data; a partition key; and the data + blob itself. The data blob could be a segment from a log file, + geographic/location data, website clickstream data, or any + other data type. + + The partition key is used to distribute data across shards. + Amazon Kinesis segregates the data records that belong to a + data stream into multiple shards, using the partition key + associated with each data record to determine which shard a + given data record belongs to. + + Partition keys are Unicode strings, with a maximum length + limit of 256 bytes. An MD5 hash function is used to map + partition keys to 128-bit integer values and to map associated + data records to shards using the hash key ranges of the + shards. You can override hashing the partition key to + determine the shard by explicitly specifying a hash value + using the `ExplicitHashKey` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. + + `PutRecord` returns the shard ID of where the data record was + placed and the sequence number that was assigned to the data + record. + + Sequence numbers generally increase over time. To guarantee + strictly increasing ordering, use the + `SequenceNumberForOrdering` parameter. For more information, + see the `Amazon Kinesis Developer Guide`_. + + If a `PutRecord` request cannot be processed because of + insufficient provisioned throughput on the shard involved in + the request, `PutRecord` throws + `ProvisionedThroughputExceededException`. + + Data records are accessible for only 24 hours from the time + that they are added to an Amazon Kinesis stream. + + :type stream_name: string + :param stream_name: The name of the stream to put the data record into. + + :type data: blob + :param data: The data blob to put into the record, which is + Base64-encoded when the blob is serialized. + The maximum size of the data blob (the payload after + Base64-decoding) is 50 kilobytes (KB) + Set `b64_encode` to disable automatic Base64 encoding. + + :type partition_key: string + :param partition_key: Determines which shard in the stream the data + record is assigned to. Partition keys are Unicode strings with a + maximum length limit of 256 bytes. Amazon Kinesis uses the + partition key as input to a hash function that maps the partition + key and associated data to a specific shard. Specifically, an MD5 + hash function is used to map partition keys to 128-bit integer + values and to map associated data records to shards. As a result of + this hashing mechanism, all data records with the same partition + key will map to the same shard within the stream. + + :type explicit_hash_key: string + :param explicit_hash_key: The hash value used to explicitly determine + the shard the data record is assigned to by overriding the + partition key hash. + + :type sequence_number_for_ordering: string + :param sequence_number_for_ordering: Guarantees strictly increasing + sequence numbers, for puts from the same client and to the same + partition key. Usage: set the `SequenceNumberForOrdering` of record + n to the sequence number of record n-1 (as returned in the + PutRecordResult when putting record n-1 ). If this parameter is not + set, records will be coarsely ordered based on arrival time. + + :type b64_encode: boolean + :param b64_encode: Whether to Base64 encode `data`. Can be set to + ``False`` if `data` is already encoded to prevent double encoding. + + """ + params = { + 'StreamName': stream_name, + 'Data': data, + 'PartitionKey': partition_key, + } + if explicit_hash_key is not None: + params['ExplicitHashKey'] = explicit_hash_key + if sequence_number_for_ordering is not None: + params['SequenceNumberForOrdering'] = sequence_number_for_ordering + if b64_encode: + if not isinstance(params['Data'], six.binary_type): + params['Data'] = params['Data'].encode('utf-8') + params['Data'] = base64.b64encode(params['Data']).decode('utf-8') + return self.make_request(action='PutRecord', + body=json.dumps(params)) + + def put_records(self, records, stream_name, b64_encode=True): + """ + Puts (writes) multiple data records from a producer into an + Amazon Kinesis stream in a single call (also referred to as a + `PutRecords` request). Use this operation to send data from a + data producer into the Amazon Kinesis stream for real-time + ingestion and processing. Each shard can support up to 1000 + records written per second, up to a maximum total of 1 MB data + written per second. + + You must specify the name of the stream that captures, stores, + and transports the data; and an array of request `Records`, + with each record in the array requiring a partition key and + data blob. + + The data blob can be any type of data; for example, a segment + from a log file, geographic/location data, website clickstream + data, and so on. + + The partition key is used by Amazon Kinesis as input to a hash + function that maps the partition key and associated data to a + specific shard. An MD5 hash function is used to map partition + keys to 128-bit integer values and to map associated data + records to shards. As a result of this hashing mechanism, all + data records with the same partition key map to the same shard + within the stream. For more information, see `Partition Key`_ + in the Amazon Kinesis Developer Guide . + + Each record in the `Records` array may include an optional + parameter, `ExplicitHashKey`, which overrides the partition + key to shard mapping. This parameter allows a data producer to + determine explicitly the shard where the record is stored. For + more information, see `Adding Multiple Records with + PutRecords`_ in the Amazon Kinesis Developer Guide . + + The `PutRecords` response includes an array of response + `Records`. Each record in the response array directly + correlates with a record in the request array using natural + ordering, from the top to the bottom of the request and + response. The response `Records` array always includes the + same number of records as the request array. + + The response `Records` array includes both successfully and + unsuccessfully processed records. Amazon Kinesis attempts to + process all records in each `PutRecords` request. A single + record failure does not stop the processing of subsequent + records. + + A successfully-processed record includes `ShardId` and + `SequenceNumber` values. The `ShardId` parameter identifies + the shard in the stream where the record is stored. The + `SequenceNumber` parameter is an identifier assigned to the + put record, unique to all records in the stream. + + An unsuccessfully-processed record includes `ErrorCode` and + `ErrorMessage` values. `ErrorCode` reflects the type of error + and can be one of the following values: + `ProvisionedThroughputExceededException` or `InternalFailure`. + `ErrorMessage` provides more detailed information about the + `ProvisionedThroughputExceededException` exception including + the account ID, stream name, and shard ID of the record that + was throttled. + + Data records are accessible for only 24 hours from the time + that they are added to an Amazon Kinesis stream. + + :type records: list + :param records: The records associated with the request. + + :type stream_name: string + :param stream_name: The stream name associated with the request. + + :type b64_encode: boolean + :param b64_encode: Whether to Base64 encode `data`. Can be set to + ``False`` if `data` is already encoded to prevent double encoding. + + """ + params = {'Records': records, 'StreamName': stream_name, } + if b64_encode: + for i in range(len(params['Records'])): + data = params['Records'][i]['Data'] + if not isinstance(data, six.binary_type): + data = data.encode('utf-8') + params['Records'][i]['Data'] = base64.b64encode( + data).decode('utf-8') + return self.make_request(action='PutRecords', + body=json.dumps(params)) + + def remove_tags_from_stream(self, stream_name, tag_keys): + """ + Deletes tags from the specified Amazon Kinesis stream. + + If you specify a tag that does not exist, it is ignored. + + :type stream_name: string + :param stream_name: The name of the stream. + + :type tag_keys: list + :param tag_keys: A list of tag keys. Each corresponding tag is removed + from the stream. + + """ + params = {'StreamName': stream_name, 'TagKeys': tag_keys, } + return self.make_request(action='RemoveTagsFromStream', + body=json.dumps(params)) + + def split_shard(self, stream_name, shard_to_split, new_starting_hash_key): + """ + Splits a shard into two new shards in the stream, to increase + the stream's capacity to ingest and transport data. + `SplitShard` is called when there is a need to increase the + overall capacity of stream because of an expected increase in + the volume of data records being ingested. + + You can also use `SplitShard` when a shard appears to be + approaching its maximum utilization, for example, when the set + of producers sending data into the specific shard are suddenly + sending more than previously anticipated. You can also call + `SplitShard` to increase stream capacity, so that more Amazon + Kinesis applications can simultaneously read data from the + stream for real-time processing. + + You must specify the shard to be split and the new hash key, + which is the position in the shard where the shard gets split + in two. In many cases, the new hash key might simply be the + average of the beginning and ending hash key, but it can be + any hash key value in the range being mapped into the shard. + For more information about splitting shards, see `Split a + Shard`_ in the Amazon Kinesis Developer Guide . + + You can use DescribeStream to determine the shard ID and hash + key values for the `ShardToSplit` and `NewStartingHashKey` + parameters that are specified in the `SplitShard` request. + + `SplitShard` is an asynchronous operation. Upon receiving a + `SplitShard` request, Amazon Kinesis immediately returns a + response and sets the stream status to `UPDATING`. After the + operation is completed, Amazon Kinesis sets the stream status + to `ACTIVE`. Read and write operations continue to work while + the stream is in the `UPDATING` state. + + You can use `DescribeStream` to check the status of the + stream, which is returned in `StreamStatus`. If the stream is + in the `ACTIVE` state, you can call `SplitShard`. If a stream + is in `CREATING` or `UPDATING` or `DELETING` states, + `DescribeStream` returns a `ResourceInUseException`. + + If the specified stream does not exist, `DescribeStream` + returns a `ResourceNotFoundException`. If you try to create + more shards than are authorized for your account, you receive + a `LimitExceededException`. + + The default limit for an AWS account is 10 shards per stream. + If you need to create a stream with more than 10 shards, + `contact AWS Support`_ to increase the limit on your account. + + If you try to operate on too many streams in parallel using + CreateStream, DeleteStream, MergeShards or SplitShard, you + receive a `LimitExceededException`. + + `SplitShard` has limit of 5 transactions per second per + account. + + :type stream_name: string + :param stream_name: The name of the stream for the shard split. + + :type shard_to_split: string + :param shard_to_split: The shard ID of the shard to split. + + :type new_starting_hash_key: string + :param new_starting_hash_key: A hash key value for the starting hash + key of one of the child shards created by the split. The hash key + range for a given shard constitutes a set of ordered contiguous + positive integers. The value for `NewStartingHashKey` must be in + the range of hash keys being mapped into the shard. The + `NewStartingHashKey` hash key value and all higher hash key values + in hash key range are distributed to one of the child shards. All + the lower hash key values in the range are distributed to the other + child shard. + + """ + params = { + 'StreamName': stream_name, + 'ShardToSplit': shard_to_split, + 'NewStartingHashKey': new_starting_hash_key, + } + return self.make_request(action='SplitShard', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response.getheaders()) + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/kms/__init__.py b/ext/boto/kms/__init__.py new file mode 100644 index 0000000000..9cfb0fb4fc --- /dev/null +++ b/ext/boto/kms/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Key Management Service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.kms.layer1 import KMSConnection + return get_regions('kms', connection_cls=KMSConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.kms.layer1 import KMSConnection + return connect('kms', region_name, connection_cls=KMSConnection, + **kw_params) diff --git a/ext/boto/kms/exceptions.py b/ext/boto/kms/exceptions.py new file mode 100644 index 0000000000..8b422560a4 --- /dev/null +++ b/ext/boto/kms/exceptions.py @@ -0,0 +1,72 @@ +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class InvalidGrantTokenException(BotoServerError): + pass + + +class DisabledException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class DependencyTimeoutException(BotoServerError): + pass + + +class InvalidMarkerException(BotoServerError): + pass + + +class AlreadyExistsException(BotoServerError): + pass + + +class InvalidCiphertextException(BotoServerError): + pass + + +class KeyUnavailableException(BotoServerError): + pass + + +class InvalidAliasNameException(BotoServerError): + pass + + +class UnsupportedOperationException(BotoServerError): + pass + + +class InvalidArnException(BotoServerError): + pass + + +class KMSInternalException(BotoServerError): + pass + + +class InvalidKeyUsageException(BotoServerError): + pass + + +class MalformedPolicyDocumentException(BotoServerError): + pass + + +class NotFoundException(BotoServerError): + pass diff --git a/ext/boto/kms/layer1.py b/ext/boto/kms/layer1.py new file mode 100644 index 0000000000..88ea2e0fb8 --- /dev/null +++ b/ext/boto/kms/layer1.py @@ -0,0 +1,821 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.kms import exceptions +from boto.compat import six +import base64 + + +class KMSConnection(AWSQueryConnection): + """ + AWS Key Management Service + AWS Key Management Service (KMS) is an encryption and key + management web service. This guide describes the KMS actions that + you can call programmatically. For general information about KMS, + see (need an address here). For the KMS developer guide, see (need + address here). + + AWS provides SDKs that consist of libraries and sample code for + various programming languages and platforms (Java, Ruby, .Net, + iOS, Android, etc.). The SDKs provide a convenient way to create + programmatic access to KMS and AWS. For example, the SDKs take + care of tasks such as signing requests (see below), managing + errors, and retrying requests automatically. For more information + about the AWS SDKs, including how to download and install them, + see `Tools for Amazon Web Services`_. + + We recommend that you use the AWS SDKs to make programmatic API + calls to KMS. However, you can also use the KMS Query API to make + to make direct calls to the KMS web service. + + **Signing Requests** + + Requests must be signed by using an access key ID and a secret + access key. We strongly recommend that you do not use your AWS + account access key ID and secret key for everyday work with KMS. + Instead, use the access key ID and secret access key for an IAM + user, or you can use the AWS Security Token Service to generate + temporary security credentials that you can use to sign requests. + + All KMS operations require `Signature Version 4`_. + + **Recording API Requests** + + KMS supports AWS CloudTrail, a service that records AWS API calls + and related events for your AWS account and delivers them to an + Amazon S3 bucket that you specify. By using the information + collected by CloudTrail, you can determine what requests were made + to KMS, who made the request, when it was made, and so on. To + learn more about CloudTrail, including how to turn it on and find + your log files, see the `AWS CloudTrail User Guide`_ + + **Additional Resources** + + For more information about credentials and request signing, see + the following: + + + + `AWS Security Credentials`_. This topic provides general + information about the types of credentials used for accessing AWS. + + `AWS Security Token Service`_. This guide describes how to + create and use temporary security credentials. + + `Signing AWS API Requests`_. This set of topics walks you + through the process of signing a request using an access key ID + and a secret access key. + """ + APIVersion = "2014-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "kms.us-east-1.amazonaws.com" + ServiceName = "KMS" + TargetPrefix = "TrentService" + ResponseError = JSONResponseError + + _faults = { + "InvalidGrantTokenException": exceptions.InvalidGrantTokenException, + "DisabledException": exceptions.DisabledException, + "LimitExceededException": exceptions.LimitExceededException, + "DependencyTimeoutException": exceptions.DependencyTimeoutException, + "InvalidMarkerException": exceptions.InvalidMarkerException, + "AlreadyExistsException": exceptions.AlreadyExistsException, + "InvalidCiphertextException": exceptions.InvalidCiphertextException, + "KeyUnavailableException": exceptions.KeyUnavailableException, + "InvalidAliasNameException": exceptions.InvalidAliasNameException, + "UnsupportedOperationException": exceptions.UnsupportedOperationException, + "InvalidArnException": exceptions.InvalidArnException, + "KMSInternalException": exceptions.KMSInternalException, + "InvalidKeyUsageException": exceptions.InvalidKeyUsageException, + "MalformedPolicyDocumentException": exceptions.MalformedPolicyDocumentException, + "NotFoundException": exceptions.NotFoundException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(KMSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_alias(self, alias_name, target_key_id): + """ + Creates a display name for a customer master key. An alias can + be used to identify a key and should be unique. The console + enforces a one-to-one mapping between the alias and a key. An + alias name can contain only alphanumeric characters, forward + slashes (/), underscores (_), and dashes (-). An alias must + start with the word "alias" followed by a forward slash + (alias/). An alias that begins with "aws" after the forward + slash (alias/aws...) is reserved by Amazon Web Services (AWS). + + :type alias_name: string + :param alias_name: String that contains the display name. Aliases that + begin with AWS are reserved. + + :type target_key_id: string + :param target_key_id: An identifier of the key for which you are + creating the alias. This value cannot be another alias. + + """ + params = { + 'AliasName': alias_name, + 'TargetKeyId': target_key_id, + } + return self.make_request(action='CreateAlias', + body=json.dumps(params)) + + def create_grant(self, key_id, grantee_principal, + retiring_principal=None, operations=None, + constraints=None, grant_tokens=None): + """ + Adds a grant to a key to specify who can access the key and + under what conditions. Grants are alternate permission + mechanisms to key policies. If absent, access to the key is + evaluated based on IAM policies attached to the user. By + default, grants do not expire. Grants can be listed, retired, + or revoked as indicated by the following APIs. Typically, when + you are finished using a grant, you retire it. When you want + to end a grant immediately, revoke it. For more information + about grants, see `Grants`_. + + #. ListGrants + #. RetireGrant + #. RevokeGrant + + :type key_id: string + :param key_id: A unique key identifier for a customer master key. This + value can be a globally unique identifier, an ARN, or an alias. + + :type grantee_principal: string + :param grantee_principal: Principal given permission by the grant to + use the key identified by the `keyId` parameter. + + :type retiring_principal: string + :param retiring_principal: Principal given permission to retire the + grant. For more information, see RetireGrant. + + :type operations: list + :param operations: List of operations permitted by the grant. This can + be any combination of one or more of the following values: + + #. Decrypt + #. Encrypt + #. GenerateDataKey + #. GenerateDataKeyWithoutPlaintext + #. ReEncryptFrom + #. ReEncryptTo + #. CreateGrant + + :type constraints: dict + :param constraints: Specifies the conditions under which the actions + specified by the `Operations` parameter are allowed. + + :type grant_tokens: list + :param grant_tokens: List of grant tokens. + + """ + params = { + 'KeyId': key_id, + 'GranteePrincipal': grantee_principal, + } + if retiring_principal is not None: + params['RetiringPrincipal'] = retiring_principal + if operations is not None: + params['Operations'] = operations + if constraints is not None: + params['Constraints'] = constraints + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + return self.make_request(action='CreateGrant', + body=json.dumps(params)) + + def create_key(self, policy=None, description=None, key_usage=None): + """ + Creates a customer master key. Customer master keys can be + used to encrypt small amounts of data (less than 4K) directly, + but they are most commonly used to encrypt or envelope data + keys that are then used to encrypt customer data. For more + information about data keys, see GenerateDataKey and + GenerateDataKeyWithoutPlaintext. + + :type policy: string + :param policy: Policy to be attached to the key. This is required and + delegates back to the account. The key is the root of trust. + + :type description: string + :param description: Description of the key. We recommend that you + choose a description that helps your customer decide whether the + key is appropriate for a task. + + :type key_usage: string + :param key_usage: Specifies the intended use of the key. Currently this + defaults to ENCRYPT/DECRYPT, and only symmetric encryption and + decryption are supported. + + """ + params = {} + if policy is not None: + params['Policy'] = policy + if description is not None: + params['Description'] = description + if key_usage is not None: + params['KeyUsage'] = key_usage + return self.make_request(action='CreateKey', + body=json.dumps(params)) + + def decrypt(self, ciphertext_blob, encryption_context=None, + grant_tokens=None): + """ + Decrypts ciphertext. Ciphertext is plaintext that has been + previously encrypted by using the Encrypt function. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext including metadata. + + :type encryption_context: map + :param encryption_context: The encryption context. If this was + specified in the Encrypt function, it must be specified here or the + decryption operation will fail. For more information, see + `Encryption Context`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform decryption. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = {'CiphertextBlob': ciphertext_blob.decode('utf-8'), } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Decrypt', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def delete_alias(self, alias_name): + """ + Deletes the specified alias. + + :type alias_name: string + :param alias_name: The alias to be deleted. + + """ + params = {'AliasName': alias_name, } + return self.make_request(action='DeleteAlias', + body=json.dumps(params)) + + def describe_key(self, key_id): + """ + Provides detailed information about the specified customer + master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + described. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DescribeKey', + body=json.dumps(params)) + + def disable_key(self, key_id): + """ + Marks a key as disabled, thereby preventing its use. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + disabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKey', + body=json.dumps(params)) + + def disable_key_rotation(self, key_id): + """ + Disables rotation of the specified key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be disabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='DisableKeyRotation', + body=json.dumps(params)) + + def enable_key(self, key_id): + """ + Marks a key as enabled, thereby permitting its use. You can + have up to 25 enabled keys at one time. + + :type key_id: string + :param key_id: Unique identifier of the customer master key to be + enabled. This can be an ARN, an alias, or a globally unique + identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKey', + body=json.dumps(params)) + + def enable_key_rotation(self, key_id): + """ + Enables rotation of the specified customer master key. + + :type key_id: string + :param key_id: Unique identifier of the customer master key for which + rotation is to be enabled. This can be an ARN, an alias, or a + globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='EnableKeyRotation', + body=json.dumps(params)) + + def encrypt(self, key_id, plaintext, encryption_context=None, + grant_tokens=None): + """ + Encrypts plaintext into ciphertext by using a customer master + key. + + :type key_id: string + :param key_id: Unique identifier of the customer master. This can be an + ARN, an alias, or the Key ID. + + :type plaintext: blob + :param plaintext: Data to be encrypted. + + :type encryption_context: map + :param encryption_context: Name:value pair that specifies the + encryption context to be used for authenticated encryption. For + more information, see `Authenticated Encryption`_. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to perform encryption. + + """ + if not isinstance(plaintext, six.binary_type): + raise TypeError( + "Value of argument ``plaintext`` " + "must be of type %s." % six.binary_type) + plaintext = base64.b64encode(plaintext) + params = {'KeyId': key_id, 'Plaintext': plaintext.decode('utf-8'), } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='Encrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_data_key(self, key_id, encryption_context=None, + number_of_bytes=None, key_spec=None, + grant_tokens=None): + """ + Generates a secure data key. Data keys are used to encrypt and + decrypt data. They are wrapped by customer master keys. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name/value pair that contains additional + data to be authenticated during the encryption and decryption + processes that use the key. This value is logged by AWS CloudTrail + to provide context around the data encrypted by the key. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. 1024 is + the current limit. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size to generate a data key for. Currently this can be AES_128 or + AES_256. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if key_spec is not None: + params['KeySpec'] = key_spec + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKey', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def generate_data_key_without_plaintext(self, key_id, + encryption_context=None, + key_spec=None, + number_of_bytes=None, + grant_tokens=None): + """ + Returns a key wrapped by a customer master key without the + plaintext copy of that key. To retrieve the plaintext, see + GenerateDataKey. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type encryption_context: map + :param encryption_context: Name:value pair that contains additional + data to be authenticated during the encryption and decryption + processes. + + :type key_spec: string + :param key_spec: Value that identifies the encryption algorithm and key + size. Currently this can be AES_128 or AES_256. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. + + :type grant_tokens: list + :param grant_tokens: A list of grant tokens that represent grants which + can be used to provide long term permissions to generate a key. + + """ + params = {'KeyId': key_id, } + if encryption_context is not None: + params['EncryptionContext'] = encryption_context + if key_spec is not None: + params['KeySpec'] = key_spec + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='GenerateDataKeyWithoutPlaintext', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def generate_random(self, number_of_bytes=None): + """ + Generates an unpredictable byte string. + + :type number_of_bytes: integer + :param number_of_bytes: Integer that contains the number of bytes to + generate. Common values are 128, 256, 512, 1024 and so on. The + current limit is 1024 bytes. + + """ + params = {} + if number_of_bytes is not None: + params['NumberOfBytes'] = number_of_bytes + response = self.make_request(action='GenerateRandom', + body=json.dumps(params)) + if response.get('Plaintext') is not None: + response['Plaintext'] = base64.b64decode( + response['Plaintext'].encode('utf-8')) + return response + + def get_key_policy(self, key_id, policy_name): + """ + Retrieves a policy attached to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: String that contains the name of the policy. + Currently, this must be "default". Policy names can be discovered + by calling ListKeyPolicies. + + """ + params = {'KeyId': key_id, 'PolicyName': policy_name, } + return self.make_request(action='GetKeyPolicy', + body=json.dumps(params)) + + def get_key_rotation_status(self, key_id): + """ + Retrieves a Boolean value that indicates whether key rotation + is enabled for the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + """ + params = {'KeyId': key_id, } + return self.make_request(action='GetKeyRotationStatus', + body=json.dumps(params)) + + def list_aliases(self, limit=None, marker=None): + """ + Lists all of the key aliases in the account. + + :type limit: integer + :param limit: Specify this parameter when paginating results to + indicate the maximum number of aliases you want in each response. + If there are additional aliases beyond the maximum you specify, the + `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter when paginating results, and only in + a subsequent request after you've received a response where the + results are truncated. Set it to the value of the `NextMarker` + element in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListAliases', + body=json.dumps(params)) + + def list_grants(self, key_id, limit=None, marker=None): + """ + List the grants for a specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of grants you want listed in the + response. If there are additional grants beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListGrants', + body=json.dumps(params)) + + def list_key_policies(self, key_id, limit=None, marker=None): + """ + Retrieves a list of policies attached to a key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of policies you want listed in the + response. If there are additional policies beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {'KeyId': key_id, } + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeyPolicies', + body=json.dumps(params)) + + def list_keys(self, limit=None, marker=None): + """ + Lists the customer master keys. + + :type limit: integer + :param limit: Specify this parameter only when paginating results to + indicate the maximum number of keys you want listed in the + response. If there are additional keys beyond the maximum you + specify, the `Truncated` response element will be set to `true.` + + :type marker: string + :param marker: Use this parameter only when paginating results, and + only in a subsequent request after you've received a response where + the results are truncated. Set it to the value of the `NextMarker` + in the response you just received. + + """ + params = {} + if limit is not None: + params['Limit'] = limit + if marker is not None: + params['Marker'] = marker + return self.make_request(action='ListKeys', + body=json.dumps(params)) + + def put_key_policy(self, key_id, policy_name, policy): + """ + Attaches a policy to the specified key. + + :type key_id: string + :param key_id: Unique identifier of the key. This can be an ARN, an + alias, or a globally unique identifier. + + :type policy_name: string + :param policy_name: Name of the policy to be attached. Currently, the + only supported name is "default". + + :type policy: string + :param policy: The policy, in JSON format, to be attached to the key. + + """ + params = { + 'KeyId': key_id, + 'PolicyName': policy_name, + 'Policy': policy, + } + return self.make_request(action='PutKeyPolicy', + body=json.dumps(params)) + + def re_encrypt(self, ciphertext_blob, destination_key_id, + source_encryption_context=None, + destination_encryption_context=None, grant_tokens=None): + """ + Encrypts data on the server side with a new customer master + key without exposing the plaintext of the data on the client + side. The data is first decrypted and then encrypted. This + operation can also be used to change the encryption context of + a ciphertext. + + :type ciphertext_blob: blob + :param ciphertext_blob: Ciphertext of the data to re-encrypt. + + :type source_encryption_context: map + :param source_encryption_context: Encryption context used to encrypt + and decrypt the data specified in the `CiphertextBlob` parameter. + + :type destination_key_id: string + :param destination_key_id: Key identifier of the key used to re-encrypt + the data. + + :type destination_encryption_context: map + :param destination_encryption_context: Encryption context to be used + when the data is re-encrypted. + + :type grant_tokens: list + :param grant_tokens: Grant tokens that identify the grants that have + permissions for the encryption and decryption process. + + """ + if not isinstance(ciphertext_blob, six.binary_type): + raise TypeError( + "Value of argument ``ciphertext_blob`` " + "must be of type %s." % six.binary_type) + ciphertext_blob = base64.b64encode(ciphertext_blob) + params = { + 'CiphertextBlob': ciphertext_blob, + 'DestinationKeyId': destination_key_id, + } + if source_encryption_context is not None: + params['SourceEncryptionContext'] = source_encryption_context + if destination_encryption_context is not None: + params['DestinationEncryptionContext'] = destination_encryption_context + if grant_tokens is not None: + params['GrantTokens'] = grant_tokens + response = self.make_request(action='ReEncrypt', + body=json.dumps(params)) + if response.get('CiphertextBlob') is not None: + response['CiphertextBlob'] = base64.b64decode( + response['CiphertextBlob'].encode('utf-8')) + return response + + def retire_grant(self, grant_token): + """ + Retires a grant. You can retire a grant when you're done using + it to clean up. You should revoke a grant when you intend to + actively deny operations that depend on it. + + :type grant_token: string + :param grant_token: Token that identifies the grant to be retired. + + """ + params = {'GrantToken': grant_token, } + return self.make_request(action='RetireGrant', + body=json.dumps(params)) + + def revoke_grant(self, key_id, grant_id): + """ + Revokes a grant. You can revoke a grant to actively deny + operations that depend on it. + + :type key_id: string + :param key_id: Unique identifier of the key associated with the grant. + + :type grant_id: string + :param grant_id: Identifier of the grant to be revoked. + + """ + params = {'KeyId': key_id, 'GrantId': grant_id, } + return self.make_request(action='RevokeGrant', + body=json.dumps(params)) + + def update_key_description(self, key_id, description): + """ + + + :type key_id: string + :param key_id: + + :type description: string + :param description: + + """ + params = {'KeyId': key_id, 'Description': description, } + return self.make_request(action='UpdateKeyDescription', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/logs/__init__.py b/ext/boto/logs/__init__.py new file mode 100644 index 0000000000..a060aaf646 --- /dev/null +++ b/ext/boto/logs/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the CloudWatch Logs service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.logs.layer1 import CloudWatchLogsConnection + return get_regions('logs', connection_cls=CloudWatchLogsConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.logs.layer1 import CloudWatchLogsConnection + return connect('logs', region_name, + connection_cls=CloudWatchLogsConnection, **kw_params) diff --git a/ext/boto/logs/exceptions.py b/ext/boto/logs/exceptions.py new file mode 100644 index 0000000000..49c01fa91c --- /dev/null +++ b/ext/boto/logs/exceptions.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class LimitExceededException(BotoServerError): + pass + + +class DataAlreadyAcceptedException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ServiceUnavailableException(BotoServerError): + pass + + +class InvalidParameterException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class ResourceAlreadyExistsException(BotoServerError): + pass + + +class OperationAbortedException(BotoServerError): + pass + + +class InvalidSequenceTokenException(BotoServerError): + pass diff --git a/ext/boto/logs/layer1.py b/ext/boto/logs/layer1.py new file mode 100644 index 0000000000..26f7aff7cd --- /dev/null +++ b/ext/boto/logs/layer1.py @@ -0,0 +1,576 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.logs import exceptions +from boto.compat import json + + +class CloudWatchLogsConnection(AWSQueryConnection): + """ + Amazon CloudWatch Logs Service API Reference + This is the Amazon CloudWatch Logs API Reference . Amazon + CloudWatch Logs is a managed service for real time monitoring and + archival of application logs. This guide provides detailed + information about Amazon CloudWatch Logs actions, data types, + parameters, and errors. For detailed information about Amazon + CloudWatch Logs features and their associated API calls, go to the + `Amazon CloudWatch Logs Developer Guide`_. + + Use the following links to get started using the Amazon CloudWatch + API Reference : + + + + `Actions`_: An alphabetical list of all Amazon CloudWatch Logs + actions. + + `Data Types`_: An alphabetical list of all Amazon CloudWatch + Logs data types. + + `Common Parameters`_: Parameters that all Query actions can use. + + `Common Errors`_: Client and server errors that all actions can + return. + + `Regions and Endpoints`_: Itemized regions and endpoints for all + AWS products. + + + In addition to using the Amazon CloudWatch Logs API, you can also + use the following SDKs and third-party libraries to access Amazon + CloudWatch Logs programmatically. + + + + `AWS SDK for Java Documentation`_ + + `AWS SDK for .NET Documentation`_ + + `AWS SDK for PHP Documentation`_ + + `AWS SDK for Ruby Documentation`_ + + + Developers in the AWS developer community also provide their own + libraries, which you can find at the following AWS developer + centers: + + + + `AWS Java Developer Center`_ + + `AWS PHP Developer Center`_ + + `AWS Python Developer Center`_ + + `AWS Ruby Developer Center`_ + + `AWS Windows and .NET Developer Center`_ + """ + APIVersion = "2014-03-28" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com" + ServiceName = "CloudWatchLogs" + TargetPrefix = "Logs_20140328" + ResponseError = JSONResponseError + + _faults = { + "LimitExceededException": exceptions.LimitExceededException, + "DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException, + "ResourceInUseException": exceptions.ResourceInUseException, + "ServiceUnavailableException": exceptions.ServiceUnavailableException, + "InvalidParameterException": exceptions.InvalidParameterException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException, + "OperationAbortedException": exceptions.OperationAbortedException, + "InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException, + } + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(CloudWatchLogsConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_log_group(self, log_group_name): + """ + Creates a new log group with the specified name. The name of + the log group must be unique within a region for an AWS + account. You can create up to 100 log groups per account. + + You must use the following guidelines when naming a log group: + + + Log group names can be between 1 and 512 characters long. + + Allowed characters are az, AZ, 09, '_' (underscore), '-' + (hyphen), '/' (forward slash), and '.' (period). + + + + Log groups are created with a default retention of 14 days. + The retention attribute allow you to configure the number of + days you want to retain log events in the specified log group. + See the `SetRetention` operation on how to modify the + retention of your log groups. + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='CreateLogGroup', + body=json.dumps(params)) + + def create_log_stream(self, log_group_name, log_stream_name): + """ + Creates a new log stream in the specified log group. The name + of the log stream must be unique within the log group. There + is no limit on the number of log streams that can exist in a + log group. + + You must use the following guidelines when naming a log + stream: + + + Log stream names can be between 1 and 512 characters long. + + The ':' colon character is not allowed. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + return self.make_request(action='CreateLogStream', + body=json.dumps(params)) + + def delete_log_group(self, log_group_name): + """ + Deletes the log group with the specified name. Amazon + CloudWatch Logs will delete a log group only if there are no + log streams and no metric filters associated with the log + group. If this condition is not satisfied, the request will + fail and the log group will not be deleted. + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='DeleteLogGroup', + body=json.dumps(params)) + + def delete_log_stream(self, log_group_name, log_stream_name): + """ + Deletes a log stream and permanently deletes all the archived + log events associated with it. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + return self.make_request(action='DeleteLogStream', + body=json.dumps(params)) + + def delete_metric_filter(self, log_group_name, filter_name): + """ + Deletes a metric filter associated with the specified log + group. + + :type log_group_name: string + :param log_group_name: + + :type filter_name: string + :param filter_name: The name of the metric filter. + + """ + params = { + 'logGroupName': log_group_name, + 'filterName': filter_name, + } + return self.make_request(action='DeleteMetricFilter', + body=json.dumps(params)) + + def delete_retention_policy(self, log_group_name): + """ + + + :type log_group_name: string + :param log_group_name: + + """ + params = {'logGroupName': log_group_name, } + return self.make_request(action='DeleteRetentionPolicy', + body=json.dumps(params)) + + def describe_log_groups(self, log_group_name_prefix=None, + next_token=None, limit=None): + """ + Returns all the log groups that are associated with the AWS + account making the request. The list returned in the response + is ASCII-sorted by log group name. + + By default, this operation returns up to 50 log groups. If + there are more log groups to list, the response would contain + a `nextToken` value in the response body. You can also limit + the number of log groups returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name_prefix: string + :param log_group_name_prefix: + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeLogGroups` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {} + if log_group_name_prefix is not None: + params['logGroupNamePrefix'] = log_group_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeLogGroups', + body=json.dumps(params)) + + def describe_log_streams(self, log_group_name, + log_stream_name_prefix=None, next_token=None, + limit=None): + """ + Returns all the log streams that are associated with the + specified log group. The list returned in the response is + ASCII-sorted by log stream name. + + By default, this operation returns up to 50 log streams. If + there are more log streams to list, the response would contain + a `nextToken` value in the response body. You can also limit + the number of log streams returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name_prefix: string + :param log_stream_name_prefix: + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeLogStreams` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {'logGroupName': log_group_name, } + if log_stream_name_prefix is not None: + params['logStreamNamePrefix'] = log_stream_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeLogStreams', + body=json.dumps(params)) + + def describe_metric_filters(self, log_group_name, + filter_name_prefix=None, next_token=None, + limit=None): + """ + Returns all the metrics filters associated with the specified + log group. The list returned in the response is ASCII-sorted + by filter name. + + By default, this operation returns up to 50 metric filters. If + there are more metric filters to list, the response would + contain a `nextToken` value in the response body. You can also + limit the number of metric filters returned in the response by + specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type filter_name_prefix: string + :param filter_name_prefix: The name of the metric filter. + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + response of the previous `DescribeMetricFilters` request. + + :type limit: integer + :param limit: The maximum number of items returned in the response. If + you don't specify a value, the request would return up to 50 items. + + """ + params = {'logGroupName': log_group_name, } + if filter_name_prefix is not None: + params['filterNamePrefix'] = filter_name_prefix + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + return self.make_request(action='DescribeMetricFilters', + body=json.dumps(params)) + + def get_log_events(self, log_group_name, log_stream_name, + start_time=None, end_time=None, next_token=None, + limit=None, start_from_head=None): + """ + Retrieves log events from the specified log stream. You can + provide an optional time range to filter the results on the + event `timestamp`. + + By default, this operation returns as much log events as can + fit in a response size of 1MB, up to 10,000 log events. The + response will always include a `nextForwardToken` and a + `nextBackwardToken` in the response body. You can use any of + these tokens in subsequent `GetLogEvents` requests to paginate + through events in either forward or backward direction. You + can also limit the number of log events returned in the + response by specifying the `limit` parameter in the request. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + :type start_time: long + :param start_time: A point in time expressed as the number milliseconds + since Jan 1, 1970 00:00:00 UTC. + + :type end_time: long + :param end_time: A point in time expressed as the number milliseconds + since Jan 1, 1970 00:00:00 UTC. + + :type next_token: string + :param next_token: A string token used for pagination that points to + the next page of results. It must be a value obtained from the + `nextForwardToken` or `nextBackwardToken` fields in the response of + the previous `GetLogEvents` request. + + :type limit: integer + :param limit: The maximum number of log events returned in the + response. If you don't specify a value, the request would return as + much log events as can fit in a response size of 1MB, up to 10,000 + log events. + + :type start_from_head: boolean + :param start_from_head: + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + } + if start_time is not None: + params['startTime'] = start_time + if end_time is not None: + params['endTime'] = end_time + if next_token is not None: + params['nextToken'] = next_token + if limit is not None: + params['limit'] = limit + if start_from_head is not None: + params['startFromHead'] = start_from_head + return self.make_request(action='GetLogEvents', + body=json.dumps(params)) + + def put_log_events(self, log_group_name, log_stream_name, log_events, + sequence_token=None): + """ + Uploads a batch of log events to the specified log stream. + + Every PutLogEvents request must include the `sequenceToken` + obtained from the response of the previous request. An upload + in a newly created log stream does not require a + `sequenceToken`. + + The batch of events must satisfy the following constraints: + + + The maximum batch size is 32,768 bytes, and this size is + calculated as the sum of all event messages in UTF-8, plus 26 + bytes for each log event. + + None of the log events in the batch can be more than 2 hours + in the future. + + None of the log events in the batch can be older than 14 + days or the retention period of the log group. + + The log events in the batch must be in chronological ordered + by their `timestamp`. + + The maximum number of log events in a batch is 1,000. + + :type log_group_name: string + :param log_group_name: + + :type log_stream_name: string + :param log_stream_name: + + :type log_events: list + :param log_events: A list of events belonging to a log stream. + + :type sequence_token: string + :param sequence_token: A string token that must be obtained from the + response of the previous `PutLogEvents` request. + + """ + params = { + 'logGroupName': log_group_name, + 'logStreamName': log_stream_name, + 'logEvents': log_events, + } + if sequence_token is not None: + params['sequenceToken'] = sequence_token + return self.make_request(action='PutLogEvents', + body=json.dumps(params)) + + def put_metric_filter(self, log_group_name, filter_name, filter_pattern, + metric_transformations): + """ + Creates or updates a metric filter and associates it with the + specified log group. Metric filters allow you to configure + rules to extract metric data from log events ingested through + `PutLogEvents` requests. + + :type log_group_name: string + :param log_group_name: + + :type filter_name: string + :param filter_name: The name of the metric filter. + + :type filter_pattern: string + :param filter_pattern: + + :type metric_transformations: list + :param metric_transformations: + + """ + params = { + 'logGroupName': log_group_name, + 'filterName': filter_name, + 'filterPattern': filter_pattern, + 'metricTransformations': metric_transformations, + } + return self.make_request(action='PutMetricFilter', + body=json.dumps(params)) + + def put_retention_policy(self, log_group_name, retention_in_days): + """ + + + :type log_group_name: string + :param log_group_name: + + :type retention_in_days: integer + :param retention_in_days: Specifies the number of days you want to + retain log events in the specified log group. Possible values are: + 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730. + + """ + params = { + 'logGroupName': log_group_name, + 'retentionInDays': retention_in_days, + } + return self.make_request(action='PutRetentionPolicy', + body=json.dumps(params)) + + def set_retention(self, log_group_name, retention_in_days): + """ + Sets the retention of the specified log group. Log groups are + created with a default retention of 14 days. The retention + attribute allow you to configure the number of days you want + to retain log events in the specified log group. + + :type log_group_name: string + :param log_group_name: + + :type retention_in_days: integer + :param retention_in_days: Specifies the number of days you want to + retain log events in the specified log group. Possible values are: + 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730. + + """ + params = { + 'logGroupName': log_group_name, + 'retentionInDays': retention_in_days, + } + return self.make_request(action='SetRetention', + body=json.dumps(params)) + + def test_metric_filter(self, filter_pattern, log_event_messages): + """ + Tests the filter pattern of a metric filter against a sample + of log event messages. You can use this operation to validate + the correctness of a metric filter pattern. + + :type filter_pattern: string + :param filter_pattern: + + :type log_event_messages: list + :param log_event_messages: + + """ + params = { + 'filterPattern': filter_pattern, + 'logEventMessages': log_event_messages, + } + return self.make_request(action='TestMetricFilter', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/machinelearning/__init__.py b/ext/boto/machinelearning/__init__.py new file mode 100644 index 0000000000..6e3ac5930e --- /dev/null +++ b/ext/boto/machinelearning/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Machine Learning. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.machinelearning.layer1 import MachineLearningConnection + return get_regions('machinelearning', + connection_cls=MachineLearningConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.machinelearning.layer1 import MachineLearningConnection + return connect('machinelearning', region_name, + connection_cls=MachineLearningConnection, **kw_params) diff --git a/ext/boto/machinelearning/exceptions.py b/ext/boto/machinelearning/exceptions.py new file mode 100644 index 0000000000..17f396fdc9 --- /dev/null +++ b/ext/boto/machinelearning/exceptions.py @@ -0,0 +1,51 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.exception import BotoServerError + + +class InternalServerException(BotoServerError): + pass + + +class LimitExceededException(BotoServerError): + pass + + +class IdempotentParameterMismatchException(BotoServerError): + pass + + +class ResourceInUseException(BotoServerError): + pass + + +class ResourceNotFoundException(BotoServerError): + pass + + +class PredictorNotMountedException(BotoServerError): + pass + + +class InvalidInputException(BotoServerError): + pass diff --git a/ext/boto/machinelearning/layer1.py b/ext/boto/machinelearning/layer1.py new file mode 100644 index 0000000000..0768fb1ea9 --- /dev/null +++ b/ext/boto/machinelearning/layer1.py @@ -0,0 +1,1408 @@ +# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json, urlsplit +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.machinelearning import exceptions + + +class MachineLearningConnection(AWSQueryConnection): + """ + Definition of the public APIs exposed by Amazon Machine Learning + """ + APIVersion = "2014-12-12" + AuthServiceName = 'machinelearning' + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "machinelearning.us-east-1.amazonaws.com" + ServiceName = "MachineLearning" + TargetPrefix = "AmazonML_20141212" + ResponseError = JSONResponseError + + _faults = { + "InternalServerException": exceptions.InternalServerException, + "LimitExceededException": exceptions.LimitExceededException, + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "IdempotentParameterMismatchException": exceptions.IdempotentParameterMismatchException, + "PredictorNotMountedException": exceptions.PredictorNotMountedException, + "InvalidInputException": exceptions.InvalidInputException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(MachineLearningConnection, self).__init__(**kwargs) + self.region = region + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_batch_prediction(self, batch_prediction_id, ml_model_id, + batch_prediction_data_source_id, output_uri, + batch_prediction_name=None): + """ + Generates predictions for a group of observations. The + observations to process exist in one or more data files + referenced by a `DataSource`. This operation creates a new + `BatchPrediction`, and uses an `MLModel` and the data files + referenced by the `DataSource` as information sources. + + `CreateBatchPrediction` is an asynchronous operation. In + response to `CreateBatchPrediction`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `BatchPrediction` + status to `PENDING`. After the `BatchPrediction` completes, + Amazon ML sets the status to `COMPLETED`. + + You can poll for status updates by using the + GetBatchPrediction operation and checking the `Status` + parameter of the result. After the `COMPLETED` status appears, + the results are available in the location specified by the + `OutputUri` parameter. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + :type batch_prediction_name: string + :param batch_prediction_name: A user-supplied name or description of + the `BatchPrediction`. `BatchPredictionName` can only use the UTF-8 + character set. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` that will generate + predictions for the group of observations. + + :type batch_prediction_data_source_id: string + :param batch_prediction_data_source_id: The ID of the `DataSource` that + points to the group of observations to predict. + + :type output_uri: string + :param output_uri: The location of an Amazon Simple Storage Service + (Amazon S3) bucket or directory to store the batch prediction + results. The following substrings are not allowed in the s3 key + portion of the "outputURI" field: ':', '//', '/./', '/../'. + Amazon ML needs permissions to store and retrieve the logs on your + behalf. For information about how to set permissions, see the + `Amazon Machine Learning Developer Guide`_. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'MLModelId': ml_model_id, + 'BatchPredictionDataSourceId': batch_prediction_data_source_id, + 'OutputUri': output_uri, + } + if batch_prediction_name is not None: + params['BatchPredictionName'] = batch_prediction_name + return self.make_request(action='CreateBatchPrediction', + body=json.dumps(params)) + + def create_data_source_from_rds(self, data_source_id, rds_data, role_arn, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object from an ` Amazon Relational + Database Service`_ (Amazon RDS). A `DataSource` references + data that can be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + `CreateDataSourceFromRDS` is an asynchronous operation. In + response to `CreateDataSourceFromRDS`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. Typically, an Amazon Resource Number (ARN) becomes + the ID for a `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type rds_data: dict + :param rds_data: + The data specification of an Amazon RDS `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon RDS database. + + ` InstanceIdentifier ` - Unique identifier for the Amazon RDS + database instance. + + + DatabaseCredentials - AWS Identity and Access Management (IAM) + credentials that are used to connect to the Amazon RDS database. + + ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an + Amazon Elastic Compute Cloud (EC2) instance to carry out the copy + task from Amazon RDS to Amazon S3. For more information, see `Role + templates`_ for data pipelines. + + ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data + Pipeline service to monitor the progress of the copy task from + Amazon RDS to Amazon Simple Storage Service (S3). For more + information, see `Role templates`_ for data pipelines. + + SecurityInfo - Security information to use to access an Amazon RDS + instance. You need to set up appropriate ingress rules for the + security entity IDs provided to allow access to the Amazon RDS + instance. Specify a [ `SubnetId`, `SecurityGroupIds`] pair for a + VPC-based Amazon RDS instance. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon S3 location for staging RDS data. The data + retrieved from Amazon RDS using `SelectSqlQuery` is stored in this + location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: The role that Amazon ML assumes on behalf of the user + to create and activate a data pipeline in the users account and + copy data (using the `SelectSqlQuery`) query from Amazon RDS to + Amazon S3. + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training. + + """ + params = { + 'DataSourceId': data_source_id, + 'RDSData': rds_data, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRDS', + body=json.dumps(params)) + + def create_data_source_from_redshift(self, data_source_id, data_spec, + role_arn, data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` from `Amazon Redshift`_. A `DataSource` + references data that can be used to perform either + CreateMLModel, CreateEvaluation or CreateBatchPrediction + operations. + + `CreateDataSourceFromRedshift` is an asynchronous operation. + In response to `CreateDataSourceFromRedshift`, Amazon Machine + Learning (Amazon ML) immediately returns and sets the + `DataSource` status to `PENDING`. After the `DataSource` is + created and ready for use, Amazon ML sets the `Status` + parameter to `COMPLETED`. `DataSource` in `COMPLETED` or + `PENDING` status can only be used to perform CreateMLModel, + CreateEvaluation, or CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observations should exist in the database hosted on an + Amazon Redshift cluster and should be specified by a + `SelectSqlQuery`. Amazon ML executes ` Unload`_ command in + Amazon Redshift to transfer the result set of `SelectSqlQuery` + to `S3StagingLocation.` + + After the `DataSource` is created, it's ready for use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item -- a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the Amazon Machine Learning Developer Guide. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of an Amazon Redshift `DataSource`: + + + + DatabaseInformation - + + + `DatabaseName ` - Name of the Amazon Redshift database. + + ` ClusterIdentifier ` - Unique ID for the Amazon Redshift cluster. + + + DatabaseCredentials - AWS Identity abd Access Management (IAM) + credentials that are used to connect to the Amazon Redshift + database. + + SelectSqlQuery - Query that is used to retrieve the observation data + for the `Datasource`. + + S3StagingLocation - Amazon Simple Storage Service (Amazon S3) + location for staging Amazon Redshift data. The data retrieved from + Amazon Relational Database Service (Amazon RDS) using + `SelectSqlQuery` is stored in this location. + + DataSchemaUri - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type role_arn: string + :param role_arn: A fully specified role Amazon Resource Name (ARN). + Amazon ML assumes the role on behalf of the user to create the + following: + + + + A security group to allow Amazon ML to execute the `SelectSqlQuery` + query on an Amazon Redshift cluster + + An Amazon S3 bucket policy to grant Amazon ML read/write permissions + on the `S3StagingLocation` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + 'RoleARN': role_arn, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromRedshift', + body=json.dumps(params)) + + def create_data_source_from_s3(self, data_source_id, data_spec, + data_source_name=None, + compute_statistics=None): + """ + Creates a `DataSource` object. A `DataSource` references data + that can be used to perform CreateMLModel, CreateEvaluation, + or CreateBatchPrediction operations. + + `CreateDataSourceFromS3` is an asynchronous operation. In + response to `CreateDataSourceFromS3`, Amazon Machine Learning + (Amazon ML) immediately returns and sets the `DataSource` + status to `PENDING`. After the `DataSource` is created and + ready for use, Amazon ML sets the `Status` parameter to + `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status + can only be used to perform CreateMLModel, CreateEvaluation or + CreateBatchPrediction operations. + + If Amazon ML cannot accept the input source, it sets the + `Status` parameter to `FAILED` and includes an error message + in the `Message` attribute of the GetDataSource operation + response. + + The observation data used in a `DataSource` should be ready to + use; that is, it should have a consistent structure, and + missing data values should be kept to a minimum. The + observation data must reside in one or more CSV files in an + Amazon Simple Storage Service (Amazon S3) bucket, along with a + schema that describes the data items by name and type. The + same schema must be used for all of the data files referenced + by the `DataSource`. + + After the `DataSource` has been created, it's ready to use in + evaluations and batch predictions. If you plan to use the + `DataSource` to train an `MLModel`, the `DataSource` requires + another item: a recipe. A recipe describes the observation + variables that participate in training an `MLModel`. A recipe + describes how each input variable will be used in training. + Will the variable be included or excluded from training? Will + the variable be manipulated, for example, combined with + another variable, or split apart into word combinations? The + recipe provides answers to these questions. For more + information, see the `Amazon Machine Learning Developer + Guide`_. + + :type data_source_id: string + :param data_source_id: A user-supplied identifier that uniquely + identifies the `DataSource`. + + :type data_source_name: string + :param data_source_name: A user-supplied name or description of the + `DataSource`. + + :type data_spec: dict + :param data_spec: + The data specification of a `DataSource`: + + + + DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location + of the observation data. + + DataSchemaLocationS3 - Amazon S3 location of the `DataSchema`. + + DataSchema - A JSON string representing the schema. This is not + required if `DataSchemaUri` is specified. + + DataRearrangement - A JSON string representing the splitting + requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- + random-seed\", + \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` + + :type compute_statistics: boolean + :param compute_statistics: The compute statistics for a `DataSource`. + The statistics are generated from the observation data referenced + by a `DataSource`. Amazon ML uses the statistics internally during + an `MLModel` training. This parameter must be set to `True` if the + ``DataSource `` needs to be used for `MLModel` training + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSpec': data_spec, + } + if data_source_name is not None: + params['DataSourceName'] = data_source_name + if compute_statistics is not None: + params['ComputeStatistics'] = compute_statistics + return self.make_request(action='CreateDataSourceFromS3', + body=json.dumps(params)) + + def create_evaluation(self, evaluation_id, ml_model_id, + evaluation_data_source_id, evaluation_name=None): + """ + Creates a new `Evaluation` of an `MLModel`. An `MLModel` is + evaluated on a set of observations associated to a + `DataSource`. Like a `DataSource` for an `MLModel`, the + `DataSource` for an `Evaluation` contains values for the + Target Variable. The `Evaluation` compares the predicted + result for each observation to the actual outcome and provides + a summary so that you know how effective the `MLModel` + functions on the test data. Evaluation generates a relevant + performance metric such as BinaryAUC, RegressionRMSE or + MulticlassAvgFScore based on the corresponding `MLModelType`: + `BINARY`, `REGRESSION` or `MULTICLASS`. + + `CreateEvaluation` is an asynchronous operation. In response + to `CreateEvaluation`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the evaluation status to + `PENDING`. After the `Evaluation` is created and ready for + use, Amazon ML sets the status to `COMPLETED`. + + You can use the GetEvaluation operation to check progress of + the evaluation during the creation operation. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation`. + + :type evaluation_name: string + :param evaluation_name: A user-supplied name or description of the + `Evaluation`. + + :type ml_model_id: string + :param ml_model_id: The ID of the `MLModel` to evaluate. + The schema used in creating the `MLModel` must match the schema of the + `DataSource` used in the `Evaluation`. + + :type evaluation_data_source_id: string + :param evaluation_data_source_id: The ID of the `DataSource` for the + evaluation. The schema of the `DataSource` must match the schema + used to create the `MLModel`. + + """ + params = { + 'EvaluationId': evaluation_id, + 'MLModelId': ml_model_id, + 'EvaluationDataSourceId': evaluation_data_source_id, + } + if evaluation_name is not None: + params['EvaluationName'] = evaluation_name + return self.make_request(action='CreateEvaluation', + body=json.dumps(params)) + + def create_ml_model(self, ml_model_id, ml_model_type, + training_data_source_id, ml_model_name=None, + parameters=None, recipe=None, recipe_uri=None): + """ + Creates a new `MLModel` using the data files and the recipe as + information sources. + + An `MLModel` is nearly immutable. Users can only update the + `MLModelName` and the `ScoreThreshold` in an `MLModel` without + creating a new `MLModel`. + + `CreateMLModel` is an asynchronous operation. In response to + `CreateMLModel`, Amazon Machine Learning (Amazon ML) + immediately returns and sets the `MLModel` status to + `PENDING`. After the `MLModel` is created and ready for use, + Amazon ML sets the status to `COMPLETED`. + + You can use the GetMLModel operation to check progress of the + `MLModel` during the creation operation. + + CreateMLModel requires a `DataSource` with computed + statistics, which can be created by setting + `ComputeStatistics` to `True` in CreateDataSourceFromRDS, + CreateDataSourceFromS3, or CreateDataSourceFromRedshift + operations. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type ml_model_type: string + :param ml_model_type: The category of supervised learning that this + `MLModel` will address. Choose from the following types: + + + Choose `REGRESSION` if the `MLModel` will be used to predict a + numeric value. + + Choose `BINARY` if the `MLModel` result has two possible values. + + Choose `MULTICLASS` if the `MLModel` result has a limited number of + values. + + + For more information, see the `Amazon Machine Learning Developer + Guide`_. + + :type parameters: map + :param parameters: + A list of the training parameters in the `MLModel`. The list is + implemented as a map of key/value pairs. + + The following is the current set of training parameters: + + + + `sgd.l1RegularizationAmount` - Coefficient regularization L1 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to zero, resulting in sparse + feature set. If you use this parameter, start by specifying a small + value such as 1.0E-08. The value is a double that ranges from 0 to + MAX_DOUBLE. The default is not to use L1 normalization. The + parameter cannot be used when `L2` is specified. Use this parameter + sparingly. + + `sgd.l2RegularizationAmount` - Coefficient regularization L2 norm. It + controls overfitting the data by penalizing large coefficients. + This tends to drive coefficients to small, nonzero values. If you + use this parameter, start by specifying a small value such as + 1.0E-08. The valuseis a double that ranges from 0 to MAX_DOUBLE. + The default is not to use L2 normalization. This cannot be used + when `L1` is specified. Use this parameter sparingly. + + `sgd.maxPasses` - Number of times that the training process traverses + the observations to build the `MLModel`. The value is an integer + that ranges from 1 to 10000. The default value is 10. + + `sgd.maxMLModelSizeInBytes` - Maximum allowed size of the model. + Depending on the input data, the size of the model might affect its + performance. The value is an integer that ranges from 100000 to + 2147483648. The default value is 33554432. + + :type training_data_source_id: string + :param training_data_source_id: The `DataSource` that points to the + training data. + + :type recipe: string + :param recipe: The data recipe for creating `MLModel`. You must specify + either the recipe or its URI. If you dont specify a recipe or its + URI, Amazon ML creates a default. + + :type recipe_uri: string + :param recipe_uri: The Amazon Simple Storage Service (Amazon S3) + location and file name that contains the `MLModel` recipe. You must + specify either the recipe or its URI. If you dont specify a recipe + or its URI, Amazon ML creates a default. + + """ + params = { + 'MLModelId': ml_model_id, + 'MLModelType': ml_model_type, + 'TrainingDataSourceId': training_data_source_id, + } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if parameters is not None: + params['Parameters'] = parameters + if recipe is not None: + params['Recipe'] = recipe + if recipe_uri is not None: + params['RecipeUri'] = recipe_uri + return self.make_request(action='CreateMLModel', + body=json.dumps(params)) + + def create_realtime_endpoint(self, ml_model_id): + """ + Creates a real-time endpoint for the `MLModel`. The endpoint + contains the URI of the `MLModel`; that is, the location to + send real-time prediction requests for the specified + `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='CreateRealtimeEndpoint', + body=json.dumps(params)) + + def delete_batch_prediction(self, batch_prediction_id): + """ + Assigns the DELETED status to a `BatchPrediction`, rendering + it unusable. + + After using the `DeleteBatchPrediction` operation, you can use + the GetBatchPrediction operation to verify that the status of + the `BatchPrediction` changed to DELETED. + + The result of the `DeleteBatchPrediction` operation is + irreversible. + + :type batch_prediction_id: string + :param batch_prediction_id: A user-supplied ID that uniquely identifies + the `BatchPrediction`. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='DeleteBatchPrediction', + body=json.dumps(params)) + + def delete_data_source(self, data_source_id): + """ + Assigns the DELETED status to a `DataSource`, rendering it + unusable. + + After using the `DeleteDataSource` operation, you can use the + GetDataSource operation to verify that the status of the + `DataSource` changed to DELETED. + + The results of the `DeleteDataSource` operation are + irreversible. + + :type data_source_id: string + :param data_source_id: A user-supplied ID that uniquely identifies the + `DataSource`. + + """ + params = {'DataSourceId': data_source_id, } + return self.make_request(action='DeleteDataSource', + body=json.dumps(params)) + + def delete_evaluation(self, evaluation_id): + """ + Assigns the `DELETED` status to an `Evaluation`, rendering it + unusable. + + After invoking the `DeleteEvaluation` operation, you can use + the GetEvaluation operation to verify that the status of the + `Evaluation` changed to `DELETED`. + + The results of the `DeleteEvaluation` operation are + irreversible. + + :type evaluation_id: string + :param evaluation_id: A user-supplied ID that uniquely identifies the + `Evaluation` to delete. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='DeleteEvaluation', + body=json.dumps(params)) + + def delete_ml_model(self, ml_model_id): + """ + Assigns the DELETED status to an `MLModel`, rendering it + unusable. + + After using the `DeleteMLModel` operation, you can use the + GetMLModel operation to verify that the status of the + `MLModel` changed to DELETED. + + The result of the `DeleteMLModel` operation is irreversible. + + :type ml_model_id: string + :param ml_model_id: A user-supplied ID that uniquely identifies the + `MLModel`. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteMLModel', + body=json.dumps(params)) + + def delete_realtime_endpoint(self, ml_model_id): + """ + Deletes a real time endpoint of an `MLModel`. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + """ + params = {'MLModelId': ml_model_id, } + return self.make_request(action='DeleteRealtimeEndpoint', + body=json.dumps(params)) + + def describe_batch_predictions(self, filter_variable=None, eq=None, + gt=None, lt=None, ge=None, le=None, + ne=None, prefix=None, sort_order=None, + next_token=None, limit=None): + """ + Returns a list of `BatchPrediction` operations that match the + search criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of + `BatchPrediction`: + + + + `CreatedAt` - Sets the search criteria to the `BatchPrediction` + creation date. + + `Status` - Sets the search criteria to the `BatchPrediction` status. + + `Name` - Sets the search criteria to the contents of the + `BatchPrediction` ** ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `BatchPrediction` creation. + + `MLModelId` - Sets the search criteria to the `MLModel` used in the + `BatchPrediction`. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + the `BatchPrediction`. + + `DataURI` - Sets the search criteria to the data file(s) used in the + `BatchPrediction`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `BatchPrediction` results will + have `FilterVariable` values that exactly match the value specified + with `EQ`. + + :type gt: string + :param gt: The greater than operator. The `BatchPrediction` results + will have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `BatchPrediction` results will + have `FilterVariable` values that are less than the value specified + with `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `BatchPrediction` + results will have `FilterVariable` values that are less than or + equal to the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `BatchPrediction` results + will have `FilterVariable` values not equal to the value specified + with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `Batch Prediction` operation could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this + `BatchPrediction`, select `Name` for the `FilterVariable` and any + of the following strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`s. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: An ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeBatchPredictions', + body=json.dumps(params)) + + def describe_data_sources(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, + prefix=None, sort_order=None, next_token=None, + limit=None): + """ + Returns a list of `DataSource` that match the search criteria + in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `DataSource`: + + + + `CreatedAt` - Sets the search criteria to `DataSource` creation + dates. + + `Status` - Sets the search criteria to `DataSource` statuses. + + `Name` - Sets the search criteria to the contents of `DataSource` ** + ** `Name`. + + `DataUri` - Sets the search criteria to the URI of data files used to + create the `DataSource`. The URI can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `DataSource` creation. + + :type eq: string + :param eq: The equal to operator. The `DataSource` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `DataSource` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `DataSource` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `DataSource` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `DataSource` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `DataSource` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, a `DataSource` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `DataSource`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `DataSource` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeDataSources', + body=json.dumps(params)) + + def describe_evaluations(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `DescribeEvaluations` that match the search + criteria in the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variable to filter a list of `Evaluation` + objects: + + + + `CreatedAt` - Sets the search criteria to the `Evaluation` creation + date. + + `Status` - Sets the search criteria to the `Evaluation` status. + + `Name` - Sets the search criteria to the contents of `Evaluation` ** + ** `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + an `Evaluation`. + + `MLModelId` - Sets the search criteria to the `MLModel` that was + evaluated. + + `DataSourceId` - Sets the search criteria to the `DataSource` used in + `Evaluation`. + + `DataUri` - Sets the search criteria to the data file(s) used in + `Evaluation`. The URL can identify either a file or an Amazon + Simple Storage Solution (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `Evaluation` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `Evaluation` results will + have `FilterVariable` values that are greater than the value + specified with `GT`. + + :type lt: string + :param lt: The less than operator. The `Evaluation` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `Evaluation` + results will have `FilterVariable` values that are greater than or + equal to the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `Evaluation` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `Evaluation` results will + have `FilterVariable` values not equal to the value specified with + `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `Evaluation` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `Evaluation`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The maximum number of `Evaluation` to include in the + result. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeEvaluations', + body=json.dumps(params)) + + def describe_ml_models(self, filter_variable=None, eq=None, gt=None, + lt=None, ge=None, le=None, ne=None, prefix=None, + sort_order=None, next_token=None, limit=None): + """ + Returns a list of `MLModel` that match the search criteria in + the request. + + :type filter_variable: string + :param filter_variable: + Use one of the following variables to filter a list of `MLModel`: + + + + `CreatedAt` - Sets the search criteria to `MLModel` creation date. + + `Status` - Sets the search criteria to `MLModel` status. + + `Name` - Sets the search criteria to the contents of `MLModel` ** ** + `Name`. + + `IAMUser` - Sets the search criteria to the user account that invoked + the `MLModel` creation. + + `TrainingDataSourceId` - Sets the search criteria to the `DataSource` + used to train one or more `MLModel`. + + `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel` + real-time endpoint status. + + `MLModelType` - Sets the search criteria to `MLModel` type: binary, + regression, or multi-class. + + `Algorithm` - Sets the search criteria to the algorithm that the + `MLModel` uses. + + `TrainingDataURI` - Sets the search criteria to the data file(s) used + in training a `MLModel`. The URL can identify either a file or an + Amazon Simple Storage Service (Amazon S3) bucket or directory. + + :type eq: string + :param eq: The equal to operator. The `MLModel` results will have + `FilterVariable` values that exactly match the value specified with + `EQ`. + + :type gt: string + :param gt: The greater than operator. The `MLModel` results will have + `FilterVariable` values that are greater than the value specified + with `GT`. + + :type lt: string + :param lt: The less than operator. The `MLModel` results will have + `FilterVariable` values that are less than the value specified with + `LT`. + + :type ge: string + :param ge: The greater than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are greater than or equal to + the value specified with `GE`. + + :type le: string + :param le: The less than or equal to operator. The `MLModel` results + will have `FilterVariable` values that are less than or equal to + the value specified with `LE`. + + :type ne: string + :param ne: The not equal to operator. The `MLModel` results will have + `FilterVariable` values not equal to the value specified with `NE`. + + :type prefix: string + :param prefix: + A string that is found at the beginning of a variable, such as `Name` + or `Id`. + + For example, an `MLModel` could have the `Name` + `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, + select `Name` for the `FilterVariable` and any of the following + strings for the `Prefix`: + + + + 2014-09 + + 2014-09-09 + + 2014-09-09-Holiday + + :type sort_order: string + :param sort_order: A two-value parameter that determines the sequence + of the resulting list of `MLModel`. + + + `asc` - Arranges the list in ascending order (A-Z, 0-9). + + `dsc` - Arranges the list in descending order (Z-A, 9-0). + + + Results are sorted by `FilterVariable`. + + :type next_token: string + :param next_token: The ID of the page in the paginated results. + + :type limit: integer + :param limit: The number of pages of information to include in the + result. The range of acceptable values is 1 through 100. The + default value is 100. + + """ + params = {} + if filter_variable is not None: + params['FilterVariable'] = filter_variable + if eq is not None: + params['EQ'] = eq + if gt is not None: + params['GT'] = gt + if lt is not None: + params['LT'] = lt + if ge is not None: + params['GE'] = ge + if le is not None: + params['LE'] = le + if ne is not None: + params['NE'] = ne + if prefix is not None: + params['Prefix'] = prefix + if sort_order is not None: + params['SortOrder'] = sort_order + if next_token is not None: + params['NextToken'] = next_token + if limit is not None: + params['Limit'] = limit + return self.make_request(action='DescribeMLModels', + body=json.dumps(params)) + + def get_batch_prediction(self, batch_prediction_id): + """ + Returns a `BatchPrediction` that includes detailed metadata, + status, and data file information for a `Batch Prediction` + request. + + :type batch_prediction_id: string + :param batch_prediction_id: An ID assigned to the `BatchPrediction` at + creation. + + """ + params = {'BatchPredictionId': batch_prediction_id, } + return self.make_request(action='GetBatchPrediction', + body=json.dumps(params)) + + def get_data_source(self, data_source_id, verbose=None): + """ + Returns a `DataSource` that includes metadata and data file + information, as well as the current status of the + `DataSource`. + + `GetDataSource` provides results in normal or verbose format. + The verbose format adds the schema description and the list of + files pointed to by the DataSource to the normal format. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetDataSource` operation should + return `DataSourceSchema`. + If true, `DataSourceSchema` is returned. + + If false, `DataSourceSchema` is not returned. + + """ + params = {'DataSourceId': data_source_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetDataSource', + body=json.dumps(params)) + + def get_evaluation(self, evaluation_id): + """ + Returns an `Evaluation` that includes metadata as well as the + current status of the `Evaluation`. + + :type evaluation_id: string + :param evaluation_id: The ID of the `Evaluation` to retrieve. The + evaluation of each `MLModel` is recorded and cataloged. The ID + provides the means to access the information. + + """ + params = {'EvaluationId': evaluation_id, } + return self.make_request(action='GetEvaluation', + body=json.dumps(params)) + + def get_ml_model(self, ml_model_id, verbose=None): + """ + Returns an `MLModel` that includes detailed metadata, and data + source information as well as the current status of the + `MLModel`. + + `GetMLModel` provides results in normal or verbose format. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` at creation. + + :type verbose: boolean + :param verbose: Specifies whether the `GetMLModel` operation should + return `Recipe`. + If true, `Recipe` is returned. + + If false, `Recipe` is not returned. + + """ + params = {'MLModelId': ml_model_id, } + if verbose is not None: + params['Verbose'] = verbose + return self.make_request(action='GetMLModel', + body=json.dumps(params)) + + def predict(self, ml_model_id, record, predict_endpoint): + """ + Generates a prediction for the observation using the specified + `MLModel`. + + + Not all response parameters will be populated because this is + dependent on the type of requested model. + + :type ml_model_id: string + :param ml_model_id: A unique identifier of the `MLModel`. + + :type record: map + :param record: A map of variable name-value pairs that represent an + observation. + + :type predict_endpoint: string + :param predict_endpoint: The endpoint to send the predict request to. + + """ + predict_host = urlsplit(predict_endpoint).hostname + if predict_host is None: + predict_host = predict_endpoint + + params = { + 'MLModelId': ml_model_id, + 'Record': record, + 'PredictEndpoint': predict_host, + } + return self.make_request(action='Predict', + body=json.dumps(params), + host=predict_host) + + def update_batch_prediction(self, batch_prediction_id, + batch_prediction_name): + """ + Updates the `BatchPredictionName` of a `BatchPrediction`. + + You can use the GetBatchPrediction operation to view the + contents of the updated data element. + + :type batch_prediction_id: string + :param batch_prediction_id: The ID assigned to the `BatchPrediction` + during creation. + + :type batch_prediction_name: string + :param batch_prediction_name: A new user-supplied name or description + of the `BatchPrediction`. + + """ + params = { + 'BatchPredictionId': batch_prediction_id, + 'BatchPredictionName': batch_prediction_name, + } + return self.make_request(action='UpdateBatchPrediction', + body=json.dumps(params)) + + def update_data_source(self, data_source_id, data_source_name): + """ + Updates the `DataSourceName` of a `DataSource`. + + You can use the GetDataSource operation to view the contents + of the updated data element. + + :type data_source_id: string + :param data_source_id: The ID assigned to the `DataSource` during + creation. + + :type data_source_name: string + :param data_source_name: A new user-supplied name or description of the + `DataSource` that will replace the current description. + + """ + params = { + 'DataSourceId': data_source_id, + 'DataSourceName': data_source_name, + } + return self.make_request(action='UpdateDataSource', + body=json.dumps(params)) + + def update_evaluation(self, evaluation_id, evaluation_name): + """ + Updates the `EvaluationName` of an `Evaluation`. + + You can use the GetEvaluation operation to view the contents + of the updated data element. + + :type evaluation_id: string + :param evaluation_id: The ID assigned to the `Evaluation` during + creation. + + :type evaluation_name: string + :param evaluation_name: A new user-supplied name or description of the + `Evaluation` that will replace the current content. + + """ + params = { + 'EvaluationId': evaluation_id, + 'EvaluationName': evaluation_name, + } + return self.make_request(action='UpdateEvaluation', + body=json.dumps(params)) + + def update_ml_model(self, ml_model_id, ml_model_name=None, + score_threshold=None): + """ + Updates the `MLModelName` and the `ScoreThreshold` of an + `MLModel`. + + You can use the GetMLModel operation to view the contents of + the updated data element. + + :type ml_model_id: string + :param ml_model_id: The ID assigned to the `MLModel` during creation. + + :type ml_model_name: string + :param ml_model_name: A user-supplied name or description of the + `MLModel`. + + :type score_threshold: float + :param score_threshold: The `ScoreThreshold` used in binary + classification `MLModel` that marks the boundary between a positive + prediction and a negative prediction. + Output values greater than or equal to the `ScoreThreshold` receive a + positive result from the `MLModel`, such as `True`. Output values + less than the `ScoreThreshold` receive a negative response from the + `MLModel`, such as `False`. + + """ + params = {'MLModelId': ml_model_id, } + if ml_model_name is not None: + params['MLModelName'] = ml_model_name + if score_threshold is not None: + params['ScoreThreshold'] = score_threshold + return self.make_request(action='UpdateMLModel', + body=json.dumps(params)) + + def make_request(self, action, body, host=None): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request_kwargs = { + 'method':'POST', 'path':'/', 'auth_path':'/', 'params':{}, + 'headers': headers, 'data':body + } + if host is not None: + headers['Host'] = host + http_request_kwargs['host'] = host + http_request = self.build_base_http_request(**http_request_kwargs) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/manage/__init__.py b/ext/boto/manage/__init__.py new file mode 100644 index 0000000000..49d029ba2c --- /dev/null +++ b/ext/boto/manage/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/ext/boto/manage/cmdshell.py b/ext/boto/manage/cmdshell.py new file mode 100644 index 0000000000..f53227763a --- /dev/null +++ b/ext/boto/manage/cmdshell.py @@ -0,0 +1,407 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +The cmdshell module uses the paramiko package to create SSH connections +to the servers that are represented by instance objects. The module has +functions for running commands, managing files, and opening interactive +shell sessions over those connections. +""" +from boto.mashups.interactive import interactive_shell +import boto +import os +import time +import shutil +import paramiko +import socket +import subprocess + +from boto.compat import StringIO + +class SSHClient(object): + """ + This class creates a paramiko.SSHClient() object that represents + a session with an SSH server. You can use the SSHClient object to send + commands to the remote host and manipulate files on the remote host. + + :ivar server: A Server object or FakeServer object. + :ivar host_key_file: The path to the user's .ssh key files. + :ivar uname: The username for the SSH connection. Default = 'root'. + :ivar timeout: The optional timeout variable for the TCP connection. + :ivar ssh_pwd: An optional password to use for authentication or for + unlocking the private key. + """ + def __init__(self, server, + host_key_file='~/.ssh/known_hosts', + uname='root', timeout=None, ssh_pwd=None): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + self._timeout = timeout + self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file, + password=ssh_pwd) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.connect() + + def connect(self, num_retries=5): + """ + Connect to an SSH server and authenticate with it. + + :type num_retries: int + :param num_retries: The maximum number of connection attempts. + """ + retry = 0 + while retry < num_retries: + try: + self._ssh_client.connect(self.server.hostname, + username=self.uname, + pkey=self._pkey, + timeout=self._timeout) + return + except socket.error as xxx_todo_changeme: + (value, message) = xxx_todo_changeme.args + if value in (51, 61, 111): + print('SSH Connection refused, will retry in 5 seconds') + time.sleep(5) + retry += 1 + else: + raise + except paramiko.BadHostKeyException: + print("%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname) + print('Edit that file to remove the entry and then hit return to try again') + raw_input('Hit Enter when ready') + retry += 1 + except EOFError: + print('Unexpected Error from SSH Connection, retry in 5 seconds') + time.sleep(5) + retry += 1 + print('Could not establish SSH connection') + + def open_sftp(self): + """ + Open an SFTP session on the SSH server. + + :rtype: :class:`paramiko.sftp_client.SFTPClient` + :return: An SFTP client object. + """ + return self._ssh_client.open_sftp() + + def get_file(self, src, dst): + """ + Open an SFTP session on the remote host, and copy a file from + the remote host to the specified path on the local host. + + :type src: string + :param src: The path to the target file on the remote host. + + :type dst: string + :param dst: The path on your local host where you want to + store the file. + """ + sftp_client = self.open_sftp() + sftp_client.get(src, dst) + + def put_file(self, src, dst): + """ + Open an SFTP session on the remote host, and copy a file from + the local host to the specified path on the remote host. + + :type src: string + :param src: The path to the target file on your local host. + + :type dst: string + :param dst: The path on the remote host where you want to store + the file. + """ + sftp_client = self.open_sftp() + sftp_client.put(src, dst) + + def open(self, filename, mode='r', bufsize=-1): + """ + Open an SFTP session to the remote host, and open a file on + that host. + + :type filename: string + :param filename: The path to the file on the remote host. + + :type mode: string + :param mode: The file interaction mode. + + :type bufsize: integer + :param bufsize: The file buffer size. + + :rtype: :class:`paramiko.sftp_file.SFTPFile` + :return: A paramiko proxy object for a file on the remote server. + """ + sftp_client = self.open_sftp() + return sftp_client.open(filename, mode, bufsize) + + def listdir(self, path): + """ + List all of the files and subdirectories at the specified path + on the remote host. + + :type path: string + :param path: The base path from which to obtain the list. + + :rtype: list + :return: A list of files and subdirectories at the specified path. + """ + sftp_client = self.open_sftp() + return sftp_client.listdir(path) + + def isdir(self, path): + """ + Check the specified path on the remote host to determine if + it is a directory. + + :type path: string + :param path: The path to the directory that you want to check. + + :rtype: integer + :return: If the path is a directory, the function returns 1. + If the path is a file or an invalid path, the function + returns 0. + """ + status = self.run('[ -d %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def exists(self, path): + """ + Check the remote host for the specified path, or a file + at the specified path. This function returns 1 if the + path or the file exist on the remote host, and returns 0 if + the path or the file does not exist on the remote host. + + :type path: string + :param path: The path to the directory or file that you want to check. + + :rtype: integer + :return: If the path or the file exist, the function returns 1. + If the path or the file do not exist on the remote host, + the function returns 0. + """ + + status = self.run('[ -a %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def shell(self): + """ + Start an interactive shell session with the remote host. + """ + channel = self._ssh_client.invoke_shell() + interactive_shell(channel) + + def run(self, command): + """ + Run a command on the remote host. + + :type command: string + :param command: The command that you want to send to the remote host. + + :rtype: tuple + :return: This function returns a tuple that contains an integer status, + the stdout from the command, and the stderr from the command. + + """ + boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) + status = 0 + try: + t = self._ssh_client.exec_command(command) + except paramiko.SSHException: + status = 1 + std_out = t[1].read() + std_err = t[2].read() + t[0].close() + t[1].close() + t[2].close() + boto.log.debug('stdout: %s' % std_out) + boto.log.debug('stderr: %s' % std_err) + return (status, std_out, std_err) + + def run_pty(self, command): + """ + Request a pseudo-terminal from a server, and execute a command on that + server. + + :type command: string + :param command: The command that you want to run on the remote host. + + :rtype: :class:`paramiko.channel.Channel` + :return: An open channel object. + """ + boto.log.debug('running:%s on %s' % (command, self.server.instance_id)) + channel = self._ssh_client.get_transport().open_session() + channel.get_pty() + channel.exec_command(command) + return channel + + def close(self): + """ + Close an SSH session and any open channels that are tied to it. + """ + transport = self._ssh_client.get_transport() + transport.close() + self.server.reset_cmdshell() + +class LocalClient(object): + """ + :ivar server: A Server object or FakeServer object. + :ivar host_key_file: The path to the user's .ssh key files. + :ivar uname: The username for the SSH connection. Default = 'root'. + """ + def __init__(self, server, host_key_file=None, uname='root'): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + + def get_file(self, src, dst): + """ + Copy a file from one directory to another. + """ + shutil.copyfile(src, dst) + + def put_file(self, src, dst): + """ + Copy a file from one directory to another. + """ + shutil.copyfile(src, dst) + + def listdir(self, path): + """ + List all of the files and subdirectories at the specified path. + + :rtype: list + :return: Return a list containing the names of the entries + in the directory given by path. + """ + return os.listdir(path) + + def isdir(self, path): + """ + Check the specified path to determine if it is a directory. + + :rtype: boolean + :return: Returns True if the path is an existing directory. + """ + return os.path.isdir(path) + + def exists(self, path): + """ + Check for the specified path, or check a file at the specified path. + + :rtype: boolean + :return: If the path or the file exist, the function returns True. + """ + return os.path.exists(path) + + def shell(self): + raise NotImplementedError('shell not supported with LocalClient') + + def run(self): + """ + Open a subprocess and run a command on the local host. + + :rtype: tuple + :return: This function returns a tuple that contains an integer status + and a string with the combined stdout and stderr output. + """ + boto.log.info('running:%s' % self.command) + log_fp = StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + while process.poll() is None: + time.sleep(1) + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info(log_fp.getvalue()) + boto.log.info('output: %s' % log_fp.getvalue()) + return (process.returncode, log_fp.getvalue()) + + def close(self): + pass + +class FakeServer(object): + """ + This object has a subset of the variables that are normally in a + :class:`boto.manage.server.Server` object. You can use this FakeServer + object to create a :class:`boto.manage.SSHClient` object if you + don't have a real Server object. + + :ivar instance: A boto Instance object. + :ivar ssh_key_file: The path to the SSH key file. + """ + def __init__(self, instance, ssh_key_file): + self.instance = instance + self.ssh_key_file = ssh_key_file + self.hostname = instance.dns_name + self.instance_id = self.instance.id + +def start(server): + """ + Connect to the specified server. + + :return: If the server is local, the function returns a + :class:`boto.manage.cmdshell.LocalClient` object. + If the server is remote, the function returns a + :class:`boto.manage.cmdshell.SSHClient` object. + """ + instance_id = boto.config.get('Instance', 'instance-id', None) + if instance_id == server.instance_id: + return LocalClient(server) + else: + return SSHClient(server) + +def sshclient_from_instance(instance, ssh_key_file, + host_key_file='~/.ssh/known_hosts', + user_name='root', ssh_pwd=None): + """ + Create and return an SSHClient object given an + instance object. + + :type instance: :class`boto.ec2.instance.Instance` object + :param instance: The instance object. + + :type ssh_key_file: string + :param ssh_key_file: A path to the private key file that is + used to log into the instance. + + :type host_key_file: string + :param host_key_file: A path to the known_hosts file used + by the SSH client. + Defaults to ~/.ssh/known_hosts + :type user_name: string + :param user_name: The username to use when logging into + the instance. Defaults to root. + + :type ssh_pwd: string + :param ssh_pwd: The passphrase, if any, associated with + private key. + """ + s = FakeServer(instance, ssh_key_file) + return SSHClient(s, host_key_file, user_name, ssh_pwd) diff --git a/ext/boto/manage/propget.py b/ext/boto/manage/propget.py new file mode 100644 index 0000000000..d034127d8b --- /dev/null +++ b/ext/boto/manage/propget.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def get(prop, choices=None): + prompt = prop.verbose_name + if not prompt: + prompt = prop.name + if choices: + if callable(choices): + choices = choices() + else: + choices = prop.get_choices() + valid = False + while not valid: + if choices: + min = 1 + max = len(choices) + for i in range(min, max+1): + value = choices[i-1] + if isinstance(value, tuple): + value = value[0] + print('[%d] %s' % (i, value)) + value = raw_input('%s [%d-%d]: ' % (prompt, min, max)) + try: + int_value = int(value) + value = choices[int_value-1] + if isinstance(value, tuple): + value = value[1] + valid = True + except ValueError: + print('%s is not a valid choice' % value) + except IndexError: + print('%s is not within the range[%d-%d]' % (min, max)) + else: + value = raw_input('%s: ' % prompt) + try: + value = prop.validate(value) + if prop.empty(value) and prop.required: + print('A value is required') + else: + valid = True + except: + print('Invalid value: %s' % value) + return value + diff --git a/ext/boto/manage/server.py b/ext/boto/manage/server.py new file mode 100644 index 0000000000..d9224ab8a8 --- /dev/null +++ b/ext/boto/manage/server.py @@ -0,0 +1,556 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 server +""" + +import boto.ec2 +from boto.mashups.iobject import IObject +from boto.pyami.config import BotoConfigPath, Config +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty +from boto.manage import propget +from boto.ec2.zone import Zone +from boto.ec2.keypair import KeyPair +import os, time +from contextlib import closing +from boto.exception import EC2ResponseError +from boto.compat import six, StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', + 'c1.medium', 'c1.xlarge', + 'm2.2xlarge', 'm2.4xlarge'] + +class Bundler(object): + + def __init__(self, server, uname='root'): + from boto.manage.cmdshell import SSHClient + self.server = server + self.uname = uname + self.ssh_client = SSHClient(server, uname=uname) + + def copy_x509(self, key_file, cert_file): + print('\tcopying cert and pk over to /mnt directory on server') + self.ssh_client.open_sftp() + path, name = os.path.split(key_file) + self.remote_key_file = '/mnt/%s' % name + self.ssh_client.put_file(key_file, self.remote_key_file) + path, name = os.path.split(cert_file) + self.remote_cert_file = '/mnt/%s' % name + self.ssh_client.put_file(cert_file, self.remote_cert_file) + print('...complete!') + + def bundle_image(self, prefix, size, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-bundle-vol ' + command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file) + command += '-u %s ' % self.server._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + return command + + def upload_bundle(self, bucket, prefix, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.server.ec2.aws_access_key_id + command += '-s %s ' % self.server.ec2.aws_secret_access_key + return command + + def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None, + size=None, ssh_key=None, fp=None, clear_history=True): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + if not ssh_key: + ssh_key = self.server.get_ssh_key_file() + self.copy_x509(key_file, cert_file) + if not fp: + fp = StringIO() + fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath) + fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ') + if clear_history: + fp.write('history -c; ') + fp.write(self.bundle_image(prefix, size, ssh_key)) + fp.write('; ') + fp.write(self.upload_bundle(bucket, prefix, ssh_key)) + fp.write('; ') + fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath) + fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys') + command = fp.getvalue() + print('running the following command on the remote server:') + print(command) + t = self.ssh_client.run(command) + print('\t%s' % t[0]) + print('\t%s' % t[1]) + print('...complete!') + print('registering image...') + self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + +class CommandLineGetter(object): + + def get_ami_list(self): + my_amis = [] + for ami in self.ec2.get_all_images(): + # hack alert, need a better way to do this! + if ami.location.find('pyami') >= 0: + my_amis.append((ami.location, ami)) + return my_amis + + def get_region(self, params): + region = params.get('region', None) + if isinstance(region, basestring): + region = boto.ec2.get_region(region) + params['region'] = region + if not region: + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + self.ec2 = params['region'].connect() + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_description(self, params): + if not params.get('description', None): + prop = self.cls.find_property('description') + params['description'] = propget.get(prop) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_ami_id(self, params): + valid = False + while not valid: + ami = params.get('ami', None) + if not ami: + prop = StringProperty(name='ami', verbose_name='AMI') + ami = propget.get(prop) + try: + rs = self.ec2.get_all_images([ami]) + if len(rs) == 1: + valid = True + params['ami'] = rs[0] + except EC2ResponseError: + pass + + def get_group(self, params): + group = params.get('group', None) + if isinstance(group, basestring): + group_list = self.ec2.get_all_security_groups() + for g in group_list: + if g.name == group: + group = g + params['group'] = g + if not group: + prop = StringProperty(name='group', verbose_name='EC2 Security Group', + choices=self.ec2.get_all_security_groups) + params['group'] = propget.get(prop) + + def get_key(self, params): + keypair = params.get('keypair', None) + if isinstance(keypair, basestring): + key_list = self.ec2.get_all_key_pairs() + for k in key_list: + if k.name == keypair: + keypair = k.name + params['keypair'] = k.name + if not keypair: + prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair', + choices=self.ec2.get_all_key_pairs) + params['keypair'] = propget.get(prop).name + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_name(params) + self.get_description(params) + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + self.get_ami_id(params) + self.get_group(params) + self.get_key(params) + +class Server(Model): + + # + # The properties of this object consists of real properties for data that + # is not already stored in EC2 somewhere (e.g. name, description) plus + # calculated properties for all of the properties that are already in + # EC2 (e.g. hostname, security groups, etc.) + # + name = StringProperty(unique=True, verbose_name="Name") + description = StringProperty(verbose_name="Description") + region_name = StringProperty(verbose_name="EC2 Region Name") + instance_id = StringProperty(verbose_name="EC2 Instance ID") + elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address") + production = BooleanProperty(verbose_name="Is This Server Production", default=False) + ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True) + zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True) + hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True) + private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True) + groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True) + security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True) + key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True) + instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True) + status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True) + launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True) + console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=open, use_method=True) + + packages = [] + plugins = [] + + @classmethod + def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key): + if not cfg.has_section('Credentials'): + cfg.add_section('Credentials') + cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id) + cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key) + if not cfg.has_section('DB_Server'): + cfg.add_section('DB_Server') + cfg.set('DB_Server', 'db_type', 'SimpleDB') + cfg.set('DB_Server', 'db_name', cls._manager.domain.name) + + @classmethod + def create(cls, config_file=None, logical_volume = None, cfg = None, **params): + """ + Create a new instance based on the specified configuration file or the specified + configuration and the passed in parameters. + + If the config_file argument is not None, the configuration is read from there. + Otherwise, the cfg argument is used. + + The config file may include other config files with a #import reference. The included + config files must reside in the same directory as the specified file. + + The logical_volume argument, if supplied, will be used to get the current physical + volume ID and use that as an override of the value specified in the config file. This + may be useful for debugging purposes when you want to debug with a production config + file but a test Volume. + + The dictionary argument may be used to override any EC2 configuration values in the + config file. + """ + if config_file: + cfg = Config(path=config_file) + if cfg.has_section('EC2'): + # include any EC2 configuration values that aren't specified in params: + for option in cfg.options('EC2'): + if option not in params: + params[option] = cfg.get('EC2', option) + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) + ami = params.get('ami') + kp = params.get('keypair') + group = params.get('group') + zone = params.get('zone') + # deal with possibly passed in logical volume: + if logical_volume != None: + cfg.set('EBS', 'logical_volume_name', logical_volume.name) + cfg_fp = StringIO() + cfg.write(cfg_fp) + # deal with the possibility that zone and/or keypair are strings read from the config file: + if isinstance(zone, Zone): + zone = zone.name + if isinstance(kp, KeyPair): + kp = kp.name + reservation = ami.run(min_count=1, + max_count=params.get('quantity', 1), + key_name=kp, + security_groups=[group], + instance_type=params.get('instance_type'), + placement = zone, + user_data = cfg_fp.getvalue()) + l = [] + i = 0 + elastic_ip = params.get('elastic_ip') + instances = reservation.instances + if elastic_ip is not None and instances.__len__() > 0: + instance = instances[0] + print('Waiting for instance to start so we can set its elastic IP address...') + # Sometimes we get a message from ec2 that says that the instance does not exist. + # Hopefully the following delay will giv eec2 enough time to get to a stable state: + time.sleep(5) + while instance.update() != 'running': + time.sleep(1) + instance.use_ip(elastic_ip) + print('set the elastic IP of the first instance to %s' % elastic_ip) + for instance in instances: + s = cls() + s.ec2 = ec2 + s.name = params.get('name') + '' if i==0 else str(i) + s.description = params.get('description') + s.region_name = region.name + s.instance_id = instance.id + if elastic_ip and i == 0: + s.elastic_ip = elastic_ip + s.put() + l.append(s) + i += 1 + return l + + @classmethod + def create_from_instance_id(cls, instance_id, name, description=''): + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + try: + rs = ec2.get_all_reservations([instance_id]) + except: + rs = [] + if len(rs) == 1: + s = cls() + s.ec2 = ec2 + s.name = name + s.description = description + s.region_name = region.name + s.instance_id = instance_id + s._reservation = rs[0] + for instance in s._reservation.instances: + if instance.id == instance_id: + s._instance = instance + s.put() + return s + return None + + @classmethod + def create_from_current_instances(cls): + servers = [] + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + rs = ec2.get_all_reservations() + for reservation in rs: + for instance in reservation.instances: + try: + next(Server.find(instance_id=instance.id)) + boto.log.info('Server for %s already exists' % instance.id) + except StopIteration: + s = cls() + s.ec2 = ec2 + s.name = instance.id + s.region_name = region.name + s.instance_id = instance.id + s._reservation = reservation + s.put() + servers.append(s) + return servers + + def __init__(self, id=None, **kw): + super(Server, self).__init__(id, **kw) + self.ssh_key_file = None + self.ec2 = None + self._cmdshell = None + self._reservation = None + self._instance = None + self._setup_ec2() + + def _setup_ec2(self): + if self.ec2 and self._instance and self._reservation: + return + if self.id: + if self.region_name: + for region in boto.ec2.regions(): + if region.name == self.region_name: + self.ec2 = region.connect() + if self.instance_id and not self._instance: + try: + rs = self.ec2.get_all_reservations([self.instance_id]) + if len(rs) >= 1: + for instance in rs[0].instances: + if instance.id == self.instance_id: + self._reservation = rs[0] + self._instance = instance + except EC2ResponseError: + pass + + def _status(self): + status = '' + if self._instance: + self._instance.update() + status = self._instance.state + return status + + def _hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.public_dns_name + return hostname + + def _private_hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.private_dns_name + return hostname + + def _instance_type(self): + it = '' + if self._instance: + it = self._instance.instance_type + return it + + def _launch_time(self): + lt = '' + if self._instance: + lt = self._instance.launch_time + return lt + + def _console_output(self): + co = '' + if self._instance: + co = self._instance.get_console_output() + return co + + def _groups(self): + gn = [] + if self._reservation: + gn = self._reservation.groups + return gn + + def _security_group(self): + groups = self._groups() + if len(groups) >= 1: + return groups[0].id + return "" + + def _zone(self): + zone = None + if self._instance: + zone = self._instance.placement + return zone + + def _key_name(self): + kn = None + if self._instance: + kn = self._instance.key_name + return kn + + def put(self): + super(Server, self).put() + self._setup_ec2() + + def delete(self): + if self.production: + raise ValueError("Can't delete a production server") + #self.stop() + super(Server, self).delete() + + def stop(self): + if self.production: + raise ValueError("Can't delete a production server") + if self._instance: + self._instance.stop() + + def terminate(self): + if self.production: + raise ValueError("Can't delete a production server") + if self._instance: + self._instance.terminate() + + def reboot(self): + if self._instance: + self._instance.reboot() + + def wait(self): + while self.status != 'running': + time.sleep(5) + + def get_ssh_key_file(self): + if not self.ssh_key_file: + ssh_dir = os.path.expanduser('~/.ssh') + if os.path.isdir(ssh_dir): + ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name) + if os.path.isfile(ssh_file): + self.ssh_key_file = ssh_file + if not self.ssh_key_file: + iobject = IObject() + self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file') + return self.ssh_key_file + + def get_cmdshell(self): + if not self._cmdshell: + from boto.manage import cmdshell + self.get_ssh_key_file() + self._cmdshell = cmdshell.start(self) + return self._cmdshell + + def reset_cmdshell(self): + self._cmdshell = None + + def run(self, command): + with closing(self.get_cmdshell()) as cmd: + status = cmd.run(command) + return status + + def get_bundler(self, uname='root'): + self.get_ssh_key_file() + return Bundler(self, uname) + + def get_ssh_client(self, uname='root', ssh_pwd=None): + from boto.manage.cmdshell import SSHClient + self.get_ssh_key_file() + return SSHClient(self, uname=uname, ssh_pwd=ssh_pwd) + + def install(self, pkg): + return self.run('apt-get -y install %s' % pkg) + + + diff --git a/ext/boto/manage/task.py b/ext/boto/manage/task.py new file mode 100644 index 0000000000..c6663b9f0e --- /dev/null +++ b/ext/boto/manage/task.py @@ -0,0 +1,176 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty +from boto.sdb.db.model import Model +import datetime, subprocess, time +from boto.compat import StringIO + +def check_hour(val): + if val == '*': + return + if int(val) < 0 or int(val) > 23: + raise ValueError + +class Task(Model): + + """ + A scheduled, repeating task that can be executed by any participating servers. + The scheduling is similar to cron jobs. Each task has an hour attribute. + The allowable values for hour are [0-23|*]. + + To keep the operation reasonably efficient and not cause excessive polling, + the minimum granularity of a Task is hourly. Some examples: + + hour='*' - the task would be executed each hour + hour='3' - the task would be executed at 3AM GMT each day. + + """ + name = StringProperty() + hour = StringProperty(required=True, validator=check_hour, default='*') + command = StringProperty(required=True) + last_executed = DateTimeProperty() + last_status = IntegerProperty() + last_output = StringProperty() + message_id = StringProperty() + + @classmethod + def start_all(cls, queue_name): + for task in cls.all(): + task.start(queue_name) + + def __init__(self, id=None, **kw): + super(Task, self).__init__(id, **kw) + self.hourly = self.hour == '*' + self.daily = self.hour != '*' + self.now = datetime.datetime.utcnow() + + def check(self): + """ + Determine how long until the next scheduled time for a Task. + Returns the number of seconds until the next scheduled time or zero + if the task needs to be run immediately. + If it's an hourly task and it's never been run, run it now. + If it's a daily task and it's never been run and the hour is right, run it now. + """ + boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)) + + if self.hourly and not self.last_executed: + return 0 + + if self.daily and not self.last_executed: + if int(self.hour) == self.now.hour: + return 0 + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + delta = self.now - self.last_executed + if self.hourly: + if delta.seconds >= 60*60: + return 0 + else: + return 60*60 - delta.seconds + else: + if int(self.hour) == self.now.hour: + if delta.days >= 1: + return 0 + else: + return 82800 # 23 hours, just to be safe + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + def _run(self, msg, vtimeout): + boto.log.info('Task[%s] - running:%s' % (self.name, self.command)) + log_fp = StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + nsecs = 5 + current_timeout = vtimeout + while process.poll() is None: + boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) + if nsecs >= current_timeout: + current_timeout += vtimeout + boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout)) + if msg: + msg.change_visibility(current_timeout) + time.sleep(5) + nsecs += 5 + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue())) + self.last_executed = self.now + self.last_status = process.returncode + self.last_output = log_fp.getvalue()[0:1023] + + def run(self, msg, vtimeout=60): + delay = self.check() + boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay)) + if delay == 0: + self._run(msg, vtimeout) + queue = msg.queue + new_msg = queue.new_message(self.id) + new_msg = queue.write(new_msg) + self.message_id = new_msg.id + self.put() + boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id)) + msg.delete() + boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id)) + else: + boto.log.info('new_vtimeout: %d' % delay) + msg.change_visibility(delay) + + def start(self, queue_name): + boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name)) + queue = boto.lookup('sqs', queue_name) + msg = queue.new_message(self.id) + msg = queue.write(msg) + self.message_id = msg.id + self.put() + boto.log.info('Task[%s] - start successful' % self.name) + +class TaskPoller(object): + + def __init__(self, queue_name): + self.sqs = boto.connect_sqs() + self.queue = self.sqs.lookup(queue_name) + + def poll(self, wait=60, vtimeout=60): + while True: + m = self.queue.read(vtimeout) + if m: + task = Task.get_by_id(m.get_body()) + if task: + if not task.message_id or m.id == task.message_id: + boto.log.info('Task[%s] - read message %s' % (task.name, m.id)) + task.run(m, vtimeout) + else: + boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name) + else: + time.sleep(wait) + + + + + + diff --git a/ext/boto/manage/test_manage.py b/ext/boto/manage/test_manage.py new file mode 100644 index 0000000000..a8c188c319 --- /dev/null +++ b/ext/boto/manage/test_manage.py @@ -0,0 +1,34 @@ +from boto.manage.server import Server +from boto.manage.volume import Volume +import time + +print('--> Creating New Volume') +volume = Volume.create() +print(volume) + +print('--> Creating New Server') +server_list = Server.create() +server = server_list[0] +print(server) + +print('----> Waiting for Server to start up') +while server.status != 'running': + print('*') + time.sleep(10) +print('----> Server is running') + +print('--> Run "df -k" on Server') +status = server.run('df -k') +print(status[1]) + +print('--> Now run volume.make_ready to make the volume ready to use on server') +volume.make_ready(server) + +print('--> Run "df -k" on Server') +status = server.run('df -k') +print(status[1]) + +print('--> Do an "ls -al" on the new filesystem') +status = server.run('ls -al %s' % volume.mount_point) +print(status[1]) + diff --git a/ext/boto/manage/volume.py b/ext/boto/manage/volume.py new file mode 100644 index 0000000000..410414c7b3 --- /dev/null +++ b/ext/boto/manage/volume.py @@ -0,0 +1,420 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from __future__ import print_function + +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty +from boto.manage.server import Server +from boto.manage import propget +import boto.utils +import boto.ec2 +import time +import traceback +from contextlib import closing +import datetime + + +class CommandLineGetter(object): + + def get_region(self, params): + if not params.get('region', None): + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_size(self, params): + if not params.get('size', None): + prop = IntegerProperty(name='size', verbose_name='Size (GB)') + params['size'] = propget.get(prop) + + def get_mount_point(self, params): + if not params.get('mount_point', None): + prop = self.cls.find_property('mount_point') + params['mount_point'] = propget.get(prop) + + def get_device(self, params): + if not params.get('device', None): + prop = self.cls.find_property('device') + params['device'] = propget.get(prop) + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_zone(params) + self.get_name(params) + self.get_size(params) + self.get_mount_point(params) + self.get_device(params) + +class Volume(Model): + + name = StringProperty(required=True, unique=True, verbose_name='Name') + region_name = StringProperty(required=True, verbose_name='EC2 Region') + zone_name = StringProperty(required=True, verbose_name='EC2 Zone') + mount_point = StringProperty(verbose_name='Mount Point') + device = StringProperty(verbose_name="Device Name", default='/dev/sdp') + volume_id = StringProperty(required=True) + past_volume_ids = ListProperty(item_type=str) + server = ReferenceProperty(Server, collection_name='volumes', + verbose_name='Server Attached To') + volume_state = CalculatedProperty(verbose_name="Volume State", + calculated_type=str, use_method=True) + attachment_state = CalculatedProperty(verbose_name="Attachment State", + calculated_type=str, use_method=True) + size = CalculatedProperty(verbose_name="Size (GB)", + calculated_type=int, use_method=True) + + @classmethod + def create(cls, **params): + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + zone = params.get('zone') + size = params.get('size') + ebs_volume = ec2.create_volume(size, zone.name) + v = cls() + v.ec2 = ec2 + v.volume_id = ebs_volume.id + v.name = params.get('name') + v.mount_point = params.get('mount_point') + v.device = params.get('device') + v.region_name = region.name + v.zone_name = zone.name + v.put() + return v + + @classmethod + def create_from_volume_id(cls, region_name, volume_id, name): + vol = None + ec2 = boto.ec2.connect_to_region(region_name) + rs = ec2.get_all_volumes([volume_id]) + if len(rs) == 1: + v = rs[0] + vol = cls() + vol.volume_id = v.id + vol.name = name + vol.region_name = v.region.name + vol.zone_name = v.zone + vol.put() + return vol + + def create_from_latest_snapshot(self, name, size=None): + snapshot = self.get_snapshots()[-1] + return self.create_from_snapshot(name, snapshot, size) + + def create_from_snapshot(self, name, snapshot, size=None): + if size < self.size: + size = self.size + ec2 = self.get_ec2_connection() + if self.zone_name is None or self.zone_name == '': + # deal with the migration case where the zone is not set in the logical volume: + current_volume = ec2.get_all_volumes([self.volume_id])[0] + self.zone_name = current_volume.zone + ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) + v = Volume() + v.ec2 = self.ec2 + v.volume_id = ebs_volume.id + v.name = name + v.mount_point = self.mount_point + v.device = self.device + v.region_name = self.region_name + v.zone_name = self.zone_name + v.put() + return v + + def get_ec2_connection(self): + if self.server: + return self.server.ec2 + if not hasattr(self, 'ec2') or self.ec2 is None: + self.ec2 = boto.ec2.connect_to_region(self.region_name) + return self.ec2 + + def _volume_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].volume_state() + + def _attachment_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].attachment_state() + + def _size(self): + if not hasattr(self, '__size'): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + self.__size = rs[0].size + return self.__size + + def install_xfs(self): + if self.server: + self.server.install('xfsprogs xfsdump') + + def get_snapshots(self): + """ + Returns a list of all completed snapshots for this volume ID. + """ + ec2 = self.get_ec2_connection() + rs = ec2.get_all_snapshots() + all_vols = [self.volume_id] + self.past_volume_ids + snaps = [] + for snapshot in rs: + if snapshot.volume_id in all_vols: + if snapshot.progress == '100%': + snapshot.date = boto.utils.parse_ts(snapshot.start_time) + snapshot.keep = True + snaps.append(snapshot) + snaps.sort(cmp=lambda x, y: cmp(x.date, y.date)) + return snaps + + def attach(self, server=None): + if self.attachment_state == 'attached': + print('already attached') + return None + if server: + self.server = server + self.put() + ec2 = self.get_ec2_connection() + ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) + + def detach(self, force=False): + state = self.attachment_state + if state == 'available' or state is None or state == 'detaching': + print('already detached') + return None + ec2 = self.get_ec2_connection() + ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force) + self.server = None + self.put() + + def checkfs(self, use_cmd=None): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + # detemine state of file system on volume, only works if attached + if use_cmd: + cmd = use_cmd + else: + cmd = self.server.get_cmdshell() + status = cmd.run('xfs_check %s' % self.device) + if not use_cmd: + cmd.close() + if status[1].startswith('bad superblock magic number 0'): + return False + return True + + def wait(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + with closing(self.server.get_cmdshell()) as cmd: + # wait for the volume device to appear + cmd = self.server.get_cmdshell() + while not cmd.exists(self.device): + boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) + time.sleep(10) + + def format(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + status = None + with closing(self.server.get_cmdshell()) as cmd: + if not self.checkfs(cmd): + boto.log.info('make_fs...') + status = cmd.run('mkfs -t xfs %s' % self.device) + return status + + def mount(self): + if self.server is None: + raise ValueError('server attribute must be set to run this command') + boto.log.info('handle_mount_point') + with closing(self.server.get_cmdshell()) as cmd: + cmd = self.server.get_cmdshell() + if not cmd.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + cmd.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + status = cmd.run('mount -l') + lines = status[1].split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + cmd.run('umount %s' % self.mount_point) + cmd.run('mount %s /tmp' % t[0]) + cmd.run('chmod 777 /tmp') + break + # Mount up our new EBS volume onto mount_point + cmd.run("mount %s %s" % (self.device, self.mount_point)) + cmd.run('xfs_growfs %s' % self.mount_point) + + def make_ready(self, server): + self.server = server + self.put() + self.install_xfs() + self.attach() + self.wait() + self.format() + self.mount() + + def freeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point) + + def unfreeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point) + + def snapshot(self): + # if this volume is attached to a server + # we need to freeze the XFS file system + try: + self.freeze() + if self.server is None: + snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) + else: + snapshot = self.server.ec2.create_snapshot(self.volume_id) + boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot)) + except Exception: + boto.log.info('Snapshot error') + boto.log.info(traceback.format_exc()) + finally: + status = self.unfreeze() + return status + + def get_snapshot_range(self, snaps, start_date=None, end_date=None): + l = [] + for snap in snaps: + if start_date and end_date: + if snap.date >= start_date and snap.date <= end_date: + l.append(snap) + elif start_date: + if snap.date >= start_date: + l.append(snap) + elif end_date: + if snap.date <= end_date: + l.append(snap) + else: + l.append(snap) + return l + + def trim_snapshots(self, delete=False): + """ + Trim the number of snapshots for this volume. This method always + keeps the oldest snapshot. It then uses the parameters passed in + to determine how many others should be kept. + + The algorithm is to keep all snapshots from the current day. Then + it will keep the first snapshot of the day for the previous seven days. + Then, it will keep the first snapshot of the week for the previous + four weeks. After than, it will keep the first snapshot of the month + for as many months as there are. + + """ + snaps = self.get_snapshots() + # Always keep the oldest and the newest + if len(snaps) <= 2: + return snaps + snaps = snaps[1:-1] + now = datetime.datetime.now(snaps[0].date.tzinfo) + midnight = datetime.datetime(year=now.year, month=now.month, + day=now.day, tzinfo=now.tzinfo) + # Keep the first snapshot from each day of the previous week + one_week = datetime.timedelta(days=7, seconds=60*60) + print(midnight-one_week, midnight) + previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight) + print(previous_week) + if not previous_week: + return snaps + current_day = None + for snap in previous_week: + if current_day and current_day == snap.date.day: + snap.keep = False + else: + current_day = snap.date.day + # Get ourselves onto the next full week boundary + if previous_week: + week_boundary = previous_week[0].date + if week_boundary.weekday() != 0: + delta = datetime.timedelta(days=week_boundary.weekday()) + week_boundary = week_boundary - delta + # Keep one within this partial week + partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date) + if len(partial_week) > 1: + for snap in partial_week[1:]: + snap.keep = False + # Keep the first snapshot of each week for the previous 4 weeks + for i in range(0, 4): + weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary) + if len(weeks_worth) > 1: + for snap in weeks_worth[1:]: + snap.keep = False + week_boundary = week_boundary - one_week + # Now look through all remaining snaps and keep one per month + remainder = self.get_snapshot_range(snaps, end_date=week_boundary) + current_month = None + for snap in remainder: + if current_month and current_month == snap.date.month: + snap.keep = False + else: + current_month = snap.date.month + if delete: + for snap in snaps: + if not snap.keep: + boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) + snap.delete() + return snaps + + def grow(self, size): + pass + + def copy(self, snapshot): + pass + + def get_snapshot_from_date(self, date): + pass + + def delete(self, delete_ebs_volume=False): + if delete_ebs_volume: + self.detach() + ec2 = self.get_ec2_connection() + ec2.delete_volume(self.volume_id) + super(Volume, self).delete() + + def archive(self): + # snapshot volume, trim snaps, delete volume-id + pass + + diff --git a/ext/boto/mashups/__init__.py b/ext/boto/mashups/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/ext/boto/mashups/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/ext/boto/mashups/interactive.py b/ext/boto/mashups/interactive.py new file mode 100644 index 0000000000..1eb9db47d5 --- /dev/null +++ b/ext/boto/mashups/interactive.py @@ -0,0 +1,97 @@ +# Copyright (C) 2003-2007 Robey Pointer +# +# This file is part of paramiko. +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +from __future__ import print_function + +import socket +import sys + +# windows does not have termios... +try: + import termios + import tty + has_termios = True +except ImportError: + has_termios = False + + +def interactive_shell(chan): + if has_termios: + posix_shell(chan) + else: + windows_shell(chan) + + +def posix_shell(chan): + import select + + oldtty = termios.tcgetattr(sys.stdin) + try: + tty.setraw(sys.stdin.fileno()) + tty.setcbreak(sys.stdin.fileno()) + chan.settimeout(0.0) + + while True: + r, w, e = select.select([chan, sys.stdin], [], []) + if chan in r: + try: + x = chan.recv(1024) + if len(x) == 0: + print('\r\n*** EOF\r\n', end=' ') + break + sys.stdout.write(x) + sys.stdout.flush() + except socket.timeout: + pass + if sys.stdin in r: + x = sys.stdin.read(1) + if len(x) == 0: + break + chan.send(x) + + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) + + +# thanks to Mike Looijmans for this code +def windows_shell(chan): + import threading + + sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n") + + def writeall(sock): + while True: + data = sock.recv(256) + if not data: + sys.stdout.write('\r\n*** EOF ***\r\n\r\n') + sys.stdout.flush() + break + sys.stdout.write(data) + sys.stdout.flush() + + writer = threading.Thread(target=writeall, args=(chan,)) + writer.start() + + try: + while True: + d = sys.stdin.read(1) + if not d: + break + chan.send(d) + except EOFError: + # user hit ^Z or F6 + pass diff --git a/ext/boto/mashups/iobject.py b/ext/boto/mashups/iobject.py new file mode 100644 index 0000000000..f6ae98a34a --- /dev/null +++ b/ext/boto/mashups/iobject.py @@ -0,0 +1,114 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os + +def int_val_fn(v): + try: + int(v) + return True + except: + return False + +class IObject(object): + + def choose_from_list(self, item_list, search_str='', + prompt='Enter Selection'): + if not item_list: + print('No Choices Available') + return + choice = None + while not choice: + n = 1 + choices = [] + for item in item_list: + if isinstance(item, basestring): + print('[%d] %s' % (n, item)) + choices.append(item) + n += 1 + else: + obj, id, desc = item + if desc: + if desc.find(search_str) >= 0: + print('[%d] %s - %s' % (n, id, desc)) + choices.append(obj) + n += 1 + else: + if id.find(search_str) >= 0: + print('[%d] %s' % (n, id)) + choices.append(obj) + n += 1 + if choices: + val = raw_input('%s[1-%d]: ' % (prompt, len(choices))) + if val.startswith('/'): + search_str = val[1:] + else: + try: + int_val = int(val) + if int_val == 0: + return None + choice = choices[int_val-1] + except ValueError: + print('%s is not a valid choice' % val) + except IndexError: + print('%s is not within the range[1-%d]' % (val, + len(choices))) + else: + print("No objects matched your pattern") + search_str = '' + return choice + + def get_string(self, prompt, validation_fn=None): + okay = False + while not okay: + val = raw_input('%s: ' % prompt) + if validation_fn: + okay = validation_fn(val) + if not okay: + print('Invalid value: %s' % val) + else: + okay = True + return val + + def get_filename(self, prompt): + okay = False + val = '' + while not okay: + val = raw_input('%s: %s' % (prompt, val)) + val = os.path.expanduser(val) + if os.path.isfile(val): + okay = True + elif os.path.isdir(val): + path = val + val = self.choose_from_list(os.listdir(path)) + if val: + val = os.path.join(path, val) + okay = True + else: + val = '' + else: + print('Invalid value: %s' % val) + val = '' + return val + + def get_int(self, prompt): + s = self.get_string(prompt, int_val_fn) + return int(s) + diff --git a/ext/boto/mashups/order.py b/ext/boto/mashups/order.py new file mode 100644 index 0000000000..4aaec307bd --- /dev/null +++ b/ext/boto/mashups/order.py @@ -0,0 +1,211 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 order for servers +""" + +import boto +import boto.ec2 +from boto.mashups.server import Server, ServerSet +from boto.mashups.iobject import IObject +from boto.pyami.config import Config +from boto.sdb.persist import get_domain, set_domain +import time +from boto.compat import StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge'] + +class Item(IObject): + + def __init__(self): + self.region = None + self.name = None + self.instance_type = None + self.quantity = 0 + self.zone = None + self.ami = None + self.groups = [] + self.key = None + self.ec2 = None + self.config = None + + def set_userdata(self, key, value): + self.userdata[key] = value + + def get_userdata(self, key): + return self.userdata[key] + + def set_region(self, region=None): + if region: + self.region = region + else: + l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()] + self.region = self.choose_from_list(l, prompt='Choose Region') + + def set_name(self, name=None): + if name: + self.name = name + else: + self.name = self.get_string('Name') + + def set_instance_type(self, instance_type=None): + if instance_type: + self.instance_type = instance_type + else: + self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type') + + def set_quantity(self, n=0): + if n > 0: + self.quantity = n + else: + self.quantity = self.get_int('Quantity') + + def set_zone(self, zone=None): + if zone: + self.zone = zone + else: + l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()] + self.zone = self.choose_from_list(l, prompt='Choose Availability Zone') + + def set_ami(self, ami=None): + if ami: + self.ami = ami + else: + l = [(a, a.id, a.location) for a in self.ec2.get_all_images()] + self.ami = self.choose_from_list(l, prompt='Choose AMI') + + def add_group(self, group=None): + if group: + self.groups.append(group) + else: + l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()] + self.groups.append(self.choose_from_list(l, prompt='Choose Security Group')) + + def set_key(self, key=None): + if key: + self.key = key + else: + l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()] + self.key = self.choose_from_list(l, prompt='Choose Keypair') + + def update_config(self): + if not self.config.has_section('Credentials'): + self.config.add_section('Credentials') + self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id) + self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key) + if not self.config.has_section('Pyami'): + self.config.add_section('Pyami') + sdb_domain = get_domain() + if sdb_domain: + self.config.set('Pyami', 'server_sdb_domain', sdb_domain) + self.config.set('Pyami', 'server_sdb_name', self.name) + + def set_config(self, config_path=None): + if not config_path: + config_path = self.get_filename('Specify Config file') + self.config = Config(path=config_path) + + def get_userdata_string(self): + s = StringIO() + self.config.write(s) + return s.getvalue() + + def enter(self, **params): + self.region = params.get('region', self.region) + if not self.region: + self.set_region() + self.ec2 = self.region.connect() + self.name = params.get('name', self.name) + if not self.name: + self.set_name() + self.instance_type = params.get('instance_type', self.instance_type) + if not self.instance_type: + self.set_instance_type() + self.zone = params.get('zone', self.zone) + if not self.zone: + self.set_zone() + self.quantity = params.get('quantity', self.quantity) + if not self.quantity: + self.set_quantity() + self.ami = params.get('ami', self.ami) + if not self.ami: + self.set_ami() + self.groups = params.get('groups', self.groups) + if not self.groups: + self.add_group() + self.key = params.get('key', self.key) + if not self.key: + self.set_key() + self.config = params.get('config', self.config) + if not self.config: + self.set_config() + self.update_config() + +class Order(IObject): + + def __init__(self): + self.items = [] + self.reservation = None + + def add_item(self, **params): + item = Item() + item.enter(**params) + self.items.append(item) + + def display(self): + print('This Order consists of the following items') + print() + print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair') + for item in self.items: + print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type, + item.ami.id, item.groups, item.key.name)) + + def place(self, block=True): + if get_domain() is None: + print('SDB Persistence Domain not set') + domain_name = self.get_string('Specify SDB Domain') + set_domain(domain_name) + s = ServerSet() + for item in self.items: + r = item.ami.run(min_count=1, max_count=item.quantity, + key_name=item.key.name, user_data=item.get_userdata_string(), + security_groups=item.groups, instance_type=item.instance_type, + placement=item.zone.name) + if block: + states = [i.state for i in r.instances] + if states.count('running') != len(states): + print(states) + time.sleep(15) + states = [i.update() for i in r.instances] + for i in r.instances: + server = Server() + server.name = item.name + server.instance_id = i.id + server.reservation = r + server.save() + s.append(server) + if len(s) == 1: + return s[0] + else: + return s + + + diff --git a/ext/boto/mashups/server.py b/ext/boto/mashups/server.py new file mode 100644 index 0000000000..7045e7f4da --- /dev/null +++ b/ext/boto/mashups/server.py @@ -0,0 +1,395 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +High-level abstraction of an EC2 server +""" + +import boto +import boto.utils +from boto.compat import StringIO +from boto.mashups.iobject import IObject +from boto.pyami.config import Config, BotoConfigPath +from boto.mashups.interactive import interactive_shell +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty +import os + + +class ServerSet(list): + + def __getattr__(self, name): + results = [] + is_callable = False + for server in self: + try: + val = getattr(server, name) + if callable(val): + is_callable = True + results.append(val) + except: + results.append(None) + if is_callable: + self.map_list = results + return self.map + return results + + def map(self, *args): + results = [] + for fn in self.map_list: + results.append(fn(*args)) + return results + +class Server(Model): + + @property + def ec2(self): + if self._ec2 is None: + self._ec2 = boto.connect_ec2() + return self._ec2 + + @classmethod + def Inventory(cls): + """ + Returns a list of Server instances, one for each Server object + persisted in the db + """ + l = ServerSet() + rs = cls.find() + for server in rs: + l.append(server) + return l + + @classmethod + def Register(cls, name, instance_id, description=''): + s = cls() + s.name = name + s.instance_id = instance_id + s.description = description + s.save() + return s + + def __init__(self, id=None, **kw): + super(Server, self).__init__(id, **kw) + self._reservation = None + self._instance = None + self._ssh_client = None + self._pkey = None + self._config = None + self._ec2 = None + + name = StringProperty(unique=True, verbose_name="Name") + instance_id = StringProperty(verbose_name="Instance ID") + config_uri = StringProperty() + ami_id = StringProperty(verbose_name="AMI ID") + zone = StringProperty(verbose_name="Availability Zone") + security_group = StringProperty(verbose_name="Security Group", default="default") + key_name = StringProperty(verbose_name="Key Name") + elastic_ip = StringProperty(verbose_name="Elastic IP") + instance_type = StringProperty(verbose_name="Instance Type") + description = StringProperty(verbose_name="Description") + log = StringProperty() + + def setReadOnly(self, value): + raise AttributeError + + def getInstance(self): + if not self._instance: + if self.instance_id: + try: + rs = self.ec2.get_all_reservations([self.instance_id]) + except: + return None + if len(rs) > 0: + self._reservation = rs[0] + self._instance = self._reservation.instances[0] + return self._instance + + instance = property(getInstance, setReadOnly, None, 'The Instance for the server') + + def getAMI(self): + if self.instance: + return self.instance.image_id + + ami = property(getAMI, setReadOnly, None, 'The AMI for the server') + + def getStatus(self): + if self.instance: + self.instance.update() + return self.instance.state + + status = property(getStatus, setReadOnly, None, + 'The status of the server') + + def getHostname(self): + if self.instance: + return self.instance.public_dns_name + + hostname = property(getHostname, setReadOnly, None, + 'The public DNS name of the server') + + def getPrivateHostname(self): + if self.instance: + return self.instance.private_dns_name + + private_hostname = property(getPrivateHostname, setReadOnly, None, + 'The private DNS name of the server') + + def getLaunchTime(self): + if self.instance: + return self.instance.launch_time + + launch_time = property(getLaunchTime, setReadOnly, None, + 'The time the Server was started') + + def getConsoleOutput(self): + if self.instance: + return self.instance.get_console_output() + + console_output = property(getConsoleOutput, setReadOnly, None, + 'Retrieve the console output for server') + + def getGroups(self): + if self._reservation: + return self._reservation.groups + else: + return None + + groups = property(getGroups, setReadOnly, None, + 'The Security Groups controlling access to this server') + + def getConfig(self): + if not self._config: + remote_file = BotoConfigPath + local_file = '%s.ini' % self.instance.id + self.get_file(remote_file, local_file) + self._config = Config(local_file) + return self._config + + def setConfig(self, config): + local_file = '%s.ini' % self.instance.id + fp = open(local_file) + config.write(fp) + fp.close() + self.put_file(local_file, BotoConfigPath) + self._config = config + + config = property(getConfig, setConfig, None, + 'The instance data for this server') + + def set_config(self, config): + """ + Set SDB based config + """ + self._config = config + self._config.dump_to_sdb("botoConfigs", self.id) + + def load_config(self): + self._config = Config(do_load=False) + self._config.load_from_sdb("botoConfigs", self.id) + + def stop(self): + if self.instance: + self.instance.stop() + + def start(self): + self.stop() + ec2 = boto.connect_ec2() + ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0] + groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)]) + if not self._config: + self.load_config() + if not self._config.has_section("Credentials"): + self._config.add_section("Credentials") + self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id) + self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key) + + if not self._config.has_section("Pyami"): + self._config.add_section("Pyami") + + if self._manager.domain: + self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name) + self._config.set("Pyami", 'server_sdb_name', self.name) + + cfg = StringIO() + self._config.write(cfg) + cfg = cfg.getvalue() + r = ami.run(min_count=1, + max_count=1, + key_name=self.key_name, + security_groups = groups, + instance_type = self.instance_type, + placement = self.zone, + user_data = cfg) + i = r.instances[0] + self.instance_id = i.id + self.put() + if self.elastic_ip: + ec2.associate_address(self.instance_id, self.elastic_ip) + + def reboot(self): + if self.instance: + self.instance.reboot() + + def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts', + uname='root'): + import paramiko + if not self.instance: + print('No instance yet!') + return + if not self._ssh_client: + if not key_file: + iobject = IObject() + key_file = iobject.get_filename('Path to OpenSSH Key file') + self._pkey = paramiko.RSAKey.from_private_key_file(key_file) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self._ssh_client.connect(self.instance.public_dns_name, + username=uname, pkey=self._pkey) + return self._ssh_client + + def get_file(self, remotepath, localpath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.get(remotepath, localpath) + + def put_file(self, localpath, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.put(localpath, remotepath) + + def listdir(self, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + return sftp_client.listdir(remotepath) + + def shell(self, key_file=None): + ssh_client = self.get_ssh_client(key_file) + channel = ssh_client.invoke_shell() + interactive_shell(channel) + + def bundle_image(self, prefix, key_file, cert_file, size): + print('bundling image...') + print('\tcopying cert and pk over to /mnt directory on server') + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + path, name = os.path.split(key_file) + remote_key_file = '/mnt/%s' % name + self.put_file(key_file, remote_key_file) + path, name = os.path.split(cert_file) + remote_cert_file = '/mnt/%s' % name + self.put_file(cert_file, remote_cert_file) + print('\tdeleting %s' % BotoConfigPath) + # delete the metadata.ini file if it exists + try: + sftp_client.remove(BotoConfigPath) + except: + pass + command = 'sudo ec2-bundle-vol ' + command += '-c %s -k %s ' % (remote_cert_file, remote_key_file) + command += '-u %s ' % self._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + print('\t%s' % command) + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') + + def upload_bundle(self, bucket, prefix): + print('uploading bundle...') + command = 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.ec2.aws_access_key_id + command += '-s %s ' % self.ec2.aws_secret_access_key + print('\t%s' % command) + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') + + def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + self.bundle_image(prefix, key_file, cert_file, size) + self.upload_bundle(bucket, prefix) + print('registering image...') + self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + + def attach_volume(self, volume, device="/dev/sdp"): + """ + Attach an EBS volume to this server + + :param volume: EBS Volume to attach + :type volume: boto.ec2.volume.Volume + + :param device: Device to attach to (default to /dev/sdp) + :type device: string + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) + + def detach_volume(self, volume): + """ + Detach an EBS volume from this server + + :param volume: EBS Volume to detach + :type volume: boto.ec2.volume.Volume + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id) + + def install_package(self, package_name): + print('installing %s...' % package_name) + command = 'yum -y install %s' % package_name + print('\t%s' % command) + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print('\t%s' % response) + print('\t%s' % t[2].read()) + print('...complete!') diff --git a/ext/boto/mturk/__init__.py b/ext/boto/mturk/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/ext/boto/mturk/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/ext/boto/mturk/connection.py b/ext/boto/mturk/connection.py new file mode 100644 index 0000000000..5ff4216be1 --- /dev/null +++ b/ext/boto/mturk/connection.py @@ -0,0 +1,1052 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import datetime +import itertools + +from boto import handler +from boto import config +from boto.mturk.price import Price +import boto.mturk.notification +from boto.connection import AWSQueryConnection +from boto.exception import EC2ResponseError +from boto.resultset import ResultSet +from boto.mturk.question import QuestionForm, ExternalQuestion, HTMLQuestion + + +class MTurkRequestError(EC2ResponseError): + "Error for MTurk Requests" + # todo: subclass from an abstract parent of EC2ResponseError + + +class MTurkConnection(AWSQueryConnection): + + APIVersion = '2014-08-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=None, debug=0, + https_connection_factory=None, security_token=None, + profile_name=None): + if not host: + if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True': + host = 'mechanicalturk.sandbox.amazonaws.com' + else: + host = 'mechanicalturk.amazonaws.com' + self.debug = debug + + super(MTurkConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, host, debug, + https_connection_factory, + security_token=security_token, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['mturk'] + + def get_account_balance(self): + """ + """ + params = {} + return self._process_request('GetAccountBalance', params, + [('AvailableBalance', Price), + ('OnHoldBalance', Price)]) + + def register_hit_type(self, title, description, reward, duration, + keywords=None, approval_delay=None, qual_req=None): + """ + Register a new HIT Type + title, description are strings + reward is a Price object + duration can be a timedelta, or an object castable to an int + """ + params = dict( + Title=title, + Description=description, + AssignmentDurationInSeconds=self.duration_as_seconds(duration), + ) + params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward')) + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + params['AutoApprovalDelayInSeconds'] = d + + if qual_req is not None: + params.update(qual_req.get_as_params()) + + return self._process_request('RegisterHITType', params, + [('HITTypeId', HITTypeId)]) + + def set_email_notification(self, hit_type, email, event_types=None): + """ + Performs a SetHITTypeNotification operation to set email + notification for a specified HIT type + """ + return self._set_notification(hit_type, 'Email', email, + 'SetHITTypeNotification', event_types) + + def set_rest_notification(self, hit_type, url, event_types=None): + """ + Performs a SetHITTypeNotification operation to set REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SetHITTypeNotification', event_types) + + def set_sqs_notification(self, hit_type, queue_url, event_types=None): + """ + Performs a SetHITTypeNotification operation so set SQS notification + for a specified HIT type. Queue URL is of form: + https://queue.amazonaws.com// and can be + found when looking at the details for a Queue in the AWS Console + """ + return self._set_notification(hit_type, "SQS", queue_url, + 'SetHITTypeNotification', event_types) + + def send_test_event_notification(self, hit_type, url, + event_types=None, + test_event_type='Ping'): + """ + Performs a SendTestEventNotification operation with REST notification + for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, + 'SendTestEventNotification', + event_types, test_event_type) + + def _set_notification(self, hit_type, transport, + destination, request_type, + event_types=None, test_event_type=None): + """ + Common operation to set notification or send a test event + notification for a specified HIT type + """ + params = {'HITTypeId': hit_type} + + # from the Developer Guide: + # The 'Active' parameter is optional. If omitted, the active status of + # the HIT type's notification specification is unchanged. All HIT types + # begin with their notification specifications in the "inactive" status. + notification_params = {'Destination': destination, + 'Transport': transport, + 'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION, + 'Active': True, + } + + # add specific event types if required + if event_types: + self.build_list_params(notification_params, event_types, + 'EventType') + + # Set up dict of 'Notification.1.Transport' etc. values + notification_rest_params = {} + num = 1 + for key in notification_params: + notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key] + + # Update main params dict + params.update(notification_rest_params) + + # If test notification, specify the notification type to be tested + if test_event_type: + params.update({'TestEventType': test_event_type}) + + # Execute operation + return self._process_request(request_type, params) + + def create_hit(self, hit_type=None, question=None, hit_layout=None, + lifetime=datetime.timedelta(days=7), + max_assignments=1, + title=None, description=None, keywords=None, + reward=None, duration=datetime.timedelta(days=7), + approval_delay=None, annotation=None, + questions=None, qualifications=None, + layout_params=None, response_groups=None): + """ + Creates a new HIT. + Returns a ResultSet + See: http://docs.amazonwebservices.com/AWSMechTurk/2012-03-25/AWSMturkAPI/ApiReference_CreateHITOperation.html + """ + + # Handle basic required arguments and set up params dict + params = {'LifetimeInSeconds': + self.duration_as_seconds(lifetime), + 'MaxAssignments': max_assignments, + } + + # handle single or multiple questions or layouts + neither = question is None and questions is None + if hit_layout is None: + both = question is not None and questions is not None + if neither or both: + raise ValueError("Must specify question (single Question instance) or questions (list or QuestionForm instance), but not both") + if question: + questions = [question] + question_param = QuestionForm(questions) + if isinstance(question, QuestionForm): + question_param = question + elif isinstance(question, ExternalQuestion): + question_param = question + elif isinstance(question, HTMLQuestion): + question_param = question + params['Question'] = question_param.get_as_xml() + else: + if not neither: + raise ValueError("Must not specify question (single Question instance) or questions (list or QuestionForm instance) when specifying hit_layout") + params['HITLayoutId'] = hit_layout + if layout_params: + params.update(layout_params.get_as_params()) + + # if hit type specified then add it + # else add the additional required parameters + if hit_type: + params['HITTypeId'] = hit_type + else: + # Handle keywords + final_keywords = MTurkConnection.get_keywords_as_string(keywords) + + # Handle price argument + final_price = MTurkConnection.get_price_as_price(reward) + + final_duration = self.duration_as_seconds(duration) + + additional_params = dict( + Title=title, + Description=description, + Keywords=final_keywords, + AssignmentDurationInSeconds=final_duration, + ) + additional_params.update(final_price.get_as_params('Reward')) + + if approval_delay is not None: + d = self.duration_as_seconds(approval_delay) + additional_params['AutoApprovalDelayInSeconds'] = d + + # add these params to the others + params.update(additional_params) + + # add the annotation if specified + if annotation is not None: + params['RequesterAnnotation'] = annotation + + # Add the Qualifications if specified + if qualifications is not None: + params.update(qualifications.get_as_params()) + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + # Submit + return self._process_request('CreateHIT', params, [('HIT', HIT)]) + + def change_hit_type_of_hit(self, hit_id, hit_type): + """ + Change the HIT type of an existing HIT. Note that the reward associated + with the new HIT type must match the reward of the current HIT type in + order for the operation to be valid. + + :type hit_id: str + :type hit_type: str + """ + params = {'HITId': hit_id, + 'HITTypeId': hit_type} + + return self._process_request('ChangeHITTypeOfHIT', params) + + def get_reviewable_hits(self, hit_type=None, status='Reviewable', + sort_by='Expiration', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Retrieve the HITs that have a status of Reviewable, or HITs that + have a status of Reviewing, and that belong to the Requester + calling the operation. + """ + params = {'Status': status, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + # Handle optional hit_type argument + if hit_type is not None: + params.update({'HITTypeId': hit_type}) + + return self._process_request('GetReviewableHITs', params, + [('HIT', HIT)]) + + @staticmethod + def _get_pages(page_size, total_records): + """ + Given a page size (records per page) and a total number of + records, return the page numbers to be retrieved. + """ + pages = total_records / page_size + bool(total_records % page_size) + return list(range(1, pages + 1)) + + def get_all_hits(self): + """ + Return all of a Requester's HITs + + Despite what search_hits says, it does not return all hits, but + instead returns a page of hits. This method will pull the hits + from the server 100 at a time, but will yield the results + iteratively, so subsequent requests are made on demand. + """ + page_size = 100 + search_rs = self.search_hits(page_size=page_size) + total_records = int(search_rs.TotalNumResults) + get_page_hits = lambda page: self.search_hits(page_size=page_size, page_number=page) + page_nums = self._get_pages(page_size, total_records) + hit_sets = itertools.imap(get_page_hits, page_nums) + return itertools.chain.from_iterable(hit_sets) + + def search_hits(self, sort_by='CreationTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Return a page of a Requester's HITs, on behalf of the Requester. + The operation returns HITs of any status, except for HITs that + have been disposed with the DisposeHIT operation. + Note: + The SearchHITs operation does not accept any search parameters + that filter the results. + """ + params = {'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('SearchHITs', params, [('HIT', HIT)]) + + def get_assignment(self, assignment_id, response_groups=None): + """ + Retrieves an assignment using the assignment's ID. Requesters can only + retrieve their own assignments, and only assignments whose related HIT + has not been disposed. + + The returned ResultSet will have the following attributes: + + Request + This element is present only if the Request ResponseGroup + is specified. + Assignment + The assignment. The response includes one Assignment object. + HIT + The HIT associated with this assignment. The response + includes one HIT object. + + """ + + params = {'AssignmentId': assignment_id} + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignment', params, + [('Assignment', Assignment), + ('HIT', HIT)]) + + def get_assignments(self, hit_id, status=None, + sort_by='SubmitTime', sort_direction='Ascending', + page_size=10, page_number=1, response_groups=None): + """ + Retrieves completed assignments for a HIT. + Use this operation to retrieve the results for a HIT. + + The returned ResultSet will have the following attributes: + + NumResults + The number of assignments on the page in the filtered results + list, equivalent to the number of assignments being returned + by this call. + A non-negative integer, as a string. + PageNumber + The number of the page in the filtered results list being + returned. + A positive integer, as a string. + TotalNumResults + The total number of HITs in the filtered results list based + on this call. + A non-negative integer, as a string. + + The ResultSet will contain zero or more Assignment objects + + """ + params = {'HITId': hit_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + + if status is not None: + params['AssignmentStatus'] = status + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetAssignmentsForHIT', params, + [('Assignment', Assignment)]) + + def approve_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveAssignment', params) + + def reject_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('RejectAssignment', params) + + def approve_rejected_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId': assignment_id} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveRejectedAssignment', params) + + def get_file_upload_url(self, assignment_id, question_identifier): + """ + Generates and returns a temporary URL to an uploaded file. The + temporary URL is used to retrieve the file as an answer to a + FileUploadAnswer question, it is valid for 60 seconds. + + Will have a FileUploadURL attribute as per the API Reference. + """ + + params = {'AssignmentId': assignment_id, + 'QuestionIdentifier': question_identifier} + + return self._process_request('GetFileUploadURL', params, + [('FileUploadURL', FileUploadURL)]) + + def get_hit(self, hit_id, response_groups=None): + """ + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('GetHIT', params, [('HIT', HIT)]) + + def set_reviewing(self, hit_id, revert=None): + """ + Update a HIT with a status of Reviewable to have a status of Reviewing, + or reverts a Reviewing HIT back to the Reviewable status. + + Only HITs with a status of Reviewable can be updated with a status of + Reviewing. Similarly, only Reviewing HITs can be reverted back to a + status of Reviewable. + """ + params = {'HITId': hit_id} + if revert: + params['Revert'] = revert + return self._process_request('SetHITAsReviewing', params) + + def disable_hit(self, hit_id, response_groups=None): + """ + Remove a HIT from the Mechanical Turk marketplace, approves all + submitted assignments that have not already been approved or rejected, + and disposes of the HIT and all assignment data. + + Assignments for the HIT that have already been submitted, but not yet + approved or rejected, will be automatically approved. Assignments in + progress at the time of the call to DisableHIT will be approved once + the assignments are submitted. You will be charged for approval of + these assignments. DisableHIT completely disposes of the HIT and + all submitted assignment data. Assignment results data cannot be + retrieved for a HIT that has been disposed. + + It is not possible to re-enable a HIT once it has been disabled. + To make the work from a disabled HIT available again, create a new HIT. + """ + params = {'HITId': hit_id} + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + return self._process_request('DisableHIT', params) + + def dispose_hit(self, hit_id): + """ + Dispose of a HIT that is no longer needed. + + Only HITs in the "reviewable" state, with all submitted + assignments approved or rejected, can be disposed. A Requester + can call GetReviewableHITs to determine which HITs are + reviewable, then call GetAssignmentsForHIT to retrieve the + assignments. Disposing of a HIT removes the HIT from the + results of a call to GetReviewableHITs. """ + params = {'HITId': hit_id} + return self._process_request('DisposeHIT', params) + + def expire_hit(self, hit_id): + + """ + Expire a HIT that is no longer needed. + + The effect is identical to the HIT expiring on its own. The + HIT no longer appears on the Mechanical Turk web site, and no + new Workers are allowed to accept the HIT. Workers who have + accepted the HIT prior to expiration are allowed to complete + it or return it, or allow the assignment duration to elapse + (abandon the HIT). Once all remaining assignments have been + submitted, the expired HIT becomes"reviewable", and will be + returned by a call to GetReviewableHITs. + """ + params = {'HITId': hit_id} + return self._process_request('ForceExpireHIT', params) + + def extend_hit(self, hit_id, assignments_increment=None, + expiration_increment=None): + """ + Increase the maximum number of assignments, or extend the + expiration date, of an existing HIT. + + NOTE: If a HIT has a status of Reviewable and the HIT is + extended to make it Available, the HIT will not be returned by + GetReviewableHITs, and its submitted assignments will not be + returned by GetAssignmentsForHIT, until the HIT is Reviewable + again. Assignment auto-approval will still happen on its + original schedule, even if the HIT has been extended. Be sure + to retrieve and approve (or reject) submitted assignments + before extending the HIT, if so desired. + """ + # must provide assignment *or* expiration increment + if (assignments_increment is None and expiration_increment is None) or \ + (assignments_increment is not None and expiration_increment is not None): + raise ValueError("Must specify either assignments_increment or expiration_increment, but not both") + + params = {'HITId': hit_id} + if assignments_increment: + params['MaxAssignmentsIncrement'] = assignments_increment + if expiration_increment: + params['ExpirationIncrementInSeconds'] = expiration_increment + + return self._process_request('ExtendHIT', params) + + def get_help(self, about, help_type='Operation'): + """ + Return information about the Mechanical Turk Service + operations and response group NOTE - this is basically useless + as it just returns the URL of the documentation + + help_type: either 'Operation' or 'ResponseGroup' + """ + params = {'About': about, 'HelpType': help_type} + return self._process_request('Help', params) + + def grant_bonus(self, worker_id, assignment_id, bonus_price, reason): + """ + Issues a payment of money from your account to a Worker. To + be eligible for a bonus, the Worker must have submitted + results for one of your HITs, and have had those results + approved or rejected. This payment happens separately from the + reward you pay to the Worker when you approve the Worker's + assignment. The Bonus must be passed in as an instance of the + Price object. + """ + params = bonus_price.get_as_params('BonusAmount', 1) + params['WorkerId'] = worker_id + params['AssignmentId'] = assignment_id + params['Reason'] = reason + + return self._process_request('GrantBonus', params) + + def block_worker(self, worker_id, reason): + """ + Block a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('BlockWorker', params) + + def unblock_worker(self, worker_id, reason): + """ + Unblock a worker from working on my tasks. + """ + params = {'WorkerId': worker_id, 'Reason': reason} + + return self._process_request('UnblockWorker', params) + + def notify_workers(self, worker_ids, subject, message_text): + """ + Send a text message to workers. + """ + params = {'Subject': subject, + 'MessageText': message_text} + self.build_list_params(params, worker_ids, 'WorkerId') + + return self._process_request('NotifyWorkers', params) + + def create_qualification_type(self, + name, + description, + status, + keywords=None, + retry_delay=None, + test=None, + answer_key=None, + answer_key_xml=None, + test_duration=None, + auto_granted=False, + auto_granted_value=1): + """ + Create a new Qualification Type. + + name: This will be visible to workers and must be unique for a + given requester. + + description: description shown to workers. Max 2000 characters. + + status: 'Active' or 'Inactive' + + keywords: list of keyword strings or comma separated string. + Max length of 1000 characters when concatenated with commas. + + retry_delay: number of seconds after requesting a + qualification the worker must wait before they can ask again. + If not specified, workers can only request this qualification + once. + + test: a QuestionForm + + answer_key: an XML string of your answer key, for automatically + scored qualification tests. + (Consider implementing an AnswerKey class for this to support.) + + test_duration: the number of seconds a worker has to complete the test. + + auto_granted: if True, requests for the Qualification are granted + immediately. Can't coexist with a test. + + auto_granted_value: auto_granted qualifications are given this value. + + """ + + params = {'Name': name, + 'Description': description, + 'QualificationTypeStatus': status, + } + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + assert(test_duration is not None) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted: + assert(test is None) + params['AutoGranted'] = True + params['AutoGrantedValue'] = auto_granted_value + + if keywords: + params['Keywords'] = self.get_keywords_as_string(keywords) + + return self._process_request('CreateQualificationType', params, + [('QualificationType', + QualificationType)]) + + def get_qualification_type(self, qualification_type_id): + params = {'QualificationTypeId': qualification_type_id } + return self._process_request('GetQualificationType', params, + [('QualificationType', QualificationType)]) + + def get_all_qualifications_for_qual_type(self, qualification_type_id): + page_size = 100 + search_qual = self.get_qualifications_for_qualification_type(qualification_type_id) + total_records = int(search_qual.TotalNumResults) + get_page_quals = lambda page: self.get_qualifications_for_qualification_type(qualification_type_id = qualification_type_id, page_size=page_size, page_number = page) + page_nums = self._get_pages(page_size, total_records) + qual_sets = itertools.imap(get_page_quals, page_nums) + return itertools.chain.from_iterable(qual_sets) + + def get_qualifications_for_qualification_type(self, qualification_type_id, page_size=100, page_number = 1): + params = {'QualificationTypeId': qualification_type_id, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationsForQualificationType', params, + [('Qualification', Qualification)]) + + def update_qualification_type(self, qualification_type_id, + description=None, + status=None, + retry_delay=None, + test=None, + answer_key=None, + test_duration=None, + auto_granted=None, + auto_granted_value=None): + + params = {'QualificationTypeId': qualification_type_id} + + if description is not None: + params['Description'] = description + + if status is not None: + params['QualificationTypeStatus'] = status + + if retry_delay is not None: + params['RetryDelayInSeconds'] = retry_delay + + if test is not None: + assert(isinstance(test, QuestionForm)) + params['Test'] = test.get_as_xml() + + if test_duration is not None: + params['TestDurationInSeconds'] = test_duration + + if answer_key is not None: + if isinstance(answer_key, basestring): + params['AnswerKey'] = answer_key # xml + else: + raise TypeError + # Eventually someone will write an AnswerKey class. + + if auto_granted is not None: + params['AutoGranted'] = auto_granted + + if auto_granted_value is not None: + params['AutoGrantedValue'] = auto_granted_value + + return self._process_request('UpdateQualificationType', params, + [('QualificationType', QualificationType)]) + + def dispose_qualification_type(self, qualification_type_id): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id} + return self._process_request('DisposeQualificationType', params) + + def search_qualification_types(self, query=None, sort_by='Name', + sort_direction='Ascending', page_size=10, + page_number=1, must_be_requestable=True, + must_be_owned_by_caller=True): + """TODO: Document.""" + params = {'Query': query, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number, + 'MustBeRequestable': must_be_requestable, + 'MustBeOwnedByCaller': must_be_owned_by_caller} + return self._process_request('SearchQualificationTypes', params, + [('QualificationType', QualificationType)]) + + def get_qualification_requests(self, qualification_type_id, + sort_by='Expiration', + sort_direction='Ascending', page_size=10, + page_number=1): + """TODO: Document.""" + params = {'QualificationTypeId': qualification_type_id, + 'SortProperty': sort_by, + 'SortDirection': sort_direction, + 'PageSize': page_size, + 'PageNumber': page_number} + return self._process_request('GetQualificationRequests', params, + [('QualificationRequest', QualificationRequest)]) + + def grant_qualification(self, qualification_request_id, integer_value=1): + """TODO: Document.""" + params = {'QualificationRequestId': qualification_request_id, + 'IntegerValue': integer_value} + return self._process_request('GrantQualification', params) + + def revoke_qualification(self, subject_id, qualification_type_id, + reason=None): + """TODO: Document.""" + params = {'SubjectId': subject_id, + 'QualificationTypeId': qualification_type_id, + 'Reason': reason} + return self._process_request('RevokeQualification', params) + + def assign_qualification(self, qualification_type_id, worker_id, + value=1, send_notification=True): + params = {'QualificationTypeId': qualification_type_id, + 'WorkerId' : worker_id, + 'IntegerValue' : value, + 'SendNotification' : send_notification} + return self._process_request('AssignQualification', params) + + def get_qualification_score(self, qualification_type_id, worker_id): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id} + return self._process_request('GetQualificationScore', params, + [('Qualification', Qualification)]) + + def update_qualification_score(self, qualification_type_id, worker_id, + value): + """TODO: Document.""" + params = {'QualificationTypeId' : qualification_type_id, + 'SubjectId' : worker_id, + 'IntegerValue' : value} + return self._process_request('UpdateQualificationScore', params) + + def _process_request(self, request_type, params, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + params['Operation'] = request_type + response = self.make_request(None, params, verb='POST') + return self._process_response(response, marker_elems) + + def _process_response(self, response, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + body = response.read() + if self.debug == 2: + print(body) + if '' not in body.decode('utf-8'): + rs = ResultSet(marker_elems) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise MTurkRequestError(response.status, response.reason, body) + + @staticmethod + def get_keywords_as_string(keywords): + """ + Returns a comma+space-separated string of keywords from either + a list or a string + """ + if isinstance(keywords, list): + keywords = ', '.join(keywords) + if isinstance(keywords, str): + final_keywords = keywords + elif isinstance(keywords, unicode): + final_keywords = keywords.encode('utf-8') + elif keywords is None: + final_keywords = "" + else: + raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords)) + return final_keywords + + @staticmethod + def get_price_as_price(reward): + """ + Returns a Price data structure from either a float or a Price + """ + if isinstance(reward, Price): + final_price = reward + else: + final_price = Price(reward) + return final_price + + @staticmethod + def duration_as_seconds(duration): + if isinstance(duration, datetime.timedelta): + duration = duration.days * 86400 + duration.seconds + try: + duration = int(duration) + except TypeError: + raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration)) + return duration + + +class BaseAutoResultElement(object): + """ + Base class to automatically add attributes when parsing XML + """ + def __init__(self, connection): + pass + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + +class HIT(BaseAutoResultElement): + """ + Class to extract a HIT structure from a response (used in ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. HITId, HITTypeId, CreationTime + """ + + # property helper to determine if HIT has expired + def _has_expired(self): + """ Has this HIT expired yet? """ + expired = False + if hasattr(self, 'Expiration'): + now = datetime.datetime.utcnow() + expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ') + expired = (now >= expiration) + else: + raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!") + return expired + + # are we there yet? + expired = property(_has_expired) + + +class FileUploadURL(BaseAutoResultElement): + """ + Class to extract an FileUploadURL structure from a response + """ + + pass + + +class HITTypeId(BaseAutoResultElement): + """ + Class to extract an HITTypeId structure from a response + """ + + pass + + +class Qualification(BaseAutoResultElement): + """ + Class to extract an Qualification structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide such as + QualificationTypeId, IntegerValue. Does not seem to contain GrantTime. + """ + + pass + + +class QualificationType(BaseAutoResultElement): + """ + Class to extract an QualificationType structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationTypeId, CreationTime, Name, etc + """ + + pass + + +class QualificationRequest(BaseAutoResultElement): + """ + Class to extract an QualificationRequest structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc + """ + + def __init__(self, connection): + super(QualificationRequest, self).__init__(connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + super(QualificationRequest, self).endElement(name, value, connection) + + +class Assignment(BaseAutoResultElement): + """ + Class to extract an Assignment structure from a response (used in + ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. AssignmentId, WorkerId, HITId, Answer, etc + """ + + def __init__(self, connection): + super(Assignment, self).__init__(connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer)]) + h = handler.XmlHandler(answer_rs, connection) + value = connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + super(Assignment, self).endElement(name, value, connection) + + +class QuestionFormAnswer(BaseAutoResultElement): + """ + Class to extract Answers from inside the embedded XML + QuestionFormAnswers element inside the Answer element which is + part of the Assignment and QualificationRequest structures + + A QuestionFormAnswers element contains an Answer element for each + question in the HIT or Qualification test for which the Worker + provided an answer. Each Answer contains a QuestionIdentifier + element whose value corresponds to the QuestionIdentifier of a + Question in the QuestionForm. See the QuestionForm data structure + for more information about questions and answer specifications. + + If the question expects a free-text answer, the Answer element + contains a FreeText element. This element contains the Worker's + answer + + *NOTE* - currently really only supports free-text and selection answers + """ + + def __init__(self, connection): + super(QuestionFormAnswer, self).__init__(connection) + self.fields = [] + self.qid = None + + def endElement(self, name, value, connection): + if name == 'QuestionIdentifier': + self.qid = value + elif name in ['FreeText', 'SelectionIdentifier', 'OtherSelectionText'] and self.qid: + self.fields.append(value) diff --git a/ext/boto/mturk/layoutparam.py b/ext/boto/mturk/layoutparam.py new file mode 100644 index 0000000000..de7989554e --- /dev/null +++ b/ext/boto/mturk/layoutparam.py @@ -0,0 +1,55 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LayoutParameters(object): + + def __init__(self, layoutParameters=None): + if layoutParameters is None: + layoutParameters = [] + self.layoutParameters = layoutParameters + + def add(self, req): + self.layoutParameters.append(req) + + def get_as_params(self): + params = {} + assert(len(self.layoutParameters) <= 25) + for n, layoutParameter in enumerate(self.layoutParameters): + kv = layoutParameter.get_as_params() + for key in kv: + params['HITLayoutParameter.%s.%s' % ((n+1), key) ] = kv[key] + return params + +class LayoutParameter(object): + """ + Representation of a single HIT layout parameter + """ + + def __init__(self, name, value): + self.name = name + self.value = value + + def get_as_params(self): + params = { + "Name": self.name, + "Value": self.value, + } + return params diff --git a/ext/boto/mturk/notification.py b/ext/boto/mturk/notification.py new file mode 100644 index 0000000000..118daaab95 --- /dev/null +++ b/ext/boto/mturk/notification.py @@ -0,0 +1,103 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Provides NotificationMessage and Event classes, with utility methods, for +implementations of the Mechanical Turk Notification API. +""" + +import hmac +try: + from hashlib import sha1 as sha +except ImportError: + import sha +import base64 +import re + +class NotificationMessage(object): + + NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl" + NOTIFICATION_VERSION = '2006-05-05' + + SERVICE_NAME = "AWSMechanicalTurkRequesterNotification" + OPERATION_NAME = "Notify" + + EVENT_PATTERN = r"Event\.(?P\d+)\.(?P\w+)" + EVENT_RE = re.compile(EVENT_PATTERN) + + def __init__(self, d): + """ + Constructor; expects parameter d to be a dict of string parameters from a REST transport notification message + """ + self.signature = d['Signature'] # vH6ZbE0NhkF/hfNyxz2OgmzXYKs= + self.timestamp = d['Timestamp'] # 2006-05-23T23:22:30Z + self.version = d['Version'] # 2006-05-05 + assert d['method'] == NotificationMessage.OPERATION_NAME, "Method should be '%s'" % NotificationMessage.OPERATION_NAME + + # Build Events + self.events = [] + events_dict = {} + if 'Event' in d: + # TurboGears surprised me by 'doing the right thing' and making { 'Event': { '1': { 'EventType': ... } } } etc. + events_dict = d['Event'] + else: + for k in d: + v = d[k] + if k.startswith('Event.'): + ed = NotificationMessage.EVENT_RE.search(k).groupdict() + n = int(ed['n']) + param = str(ed['param']) + if n not in events_dict: + events_dict[n] = {} + events_dict[n][param] = v + for n in events_dict: + self.events.append(Event(events_dict[n])) + + def verify(self, secret_key): + """ + Verifies the authenticity of a notification message. + + TODO: This is doing a form of authentication and + this functionality should really be merged + with the pluggable authentication mechanism + at some point. + """ + verification_input = NotificationMessage.SERVICE_NAME + verification_input += NotificationMessage.OPERATION_NAME + verification_input += self.timestamp + h = hmac.new(key=secret_key, digestmod=sha) + h.update(verification_input) + signature_calc = base64.b64encode(h.digest()) + return self.signature == signature_calc + +class Event(object): + def __init__(self, d): + self.event_type = d['EventType'] + self.event_time_str = d['EventTime'] + self.hit_type = d['HITTypeId'] + self.hit_id = d['HITId'] + if 'AssignmentId' in d: # Not present in all event types + self.assignment_id = d['AssignmentId'] + + #TODO: build self.event_time datetime from string self.event_time_str + + def __repr__(self): + return "" % (self.event_type, self.hit_id) diff --git a/ext/boto/mturk/price.py b/ext/boto/mturk/price.py new file mode 100644 index 0000000000..8e194e422e --- /dev/null +++ b/ext/boto/mturk/price.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Price(object): + + def __init__(self, amount=0.0, currency_code='USD'): + self.amount = amount + self.currency_code = currency_code + self.formatted_price = '' + + def __repr__(self): + if self.formatted_price: + return self.formatted_price + else: + return str(self.amount) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Amount': + self.amount = float(value) + elif name == 'CurrencyCode': + self.currency_code = value + elif name == 'FormattedPrice': + self.formatted_price = value + + def get_as_params(self, label, ord=1): + return {'%s.%d.Amount'%(label, ord) : str(self.amount), + '%s.%d.CurrencyCode'%(label, ord) : self.currency_code} diff --git a/ext/boto/mturk/qualification.py b/ext/boto/mturk/qualification.py new file mode 100644 index 0000000000..961f19f9b8 --- /dev/null +++ b/ext/boto/mturk/qualification.py @@ -0,0 +1,157 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Qualifications(object): + + def __init__(self, requirements=None): + if requirements is None: + requirements = [] + self.requirements = requirements + + def add(self, req): + self.requirements.append(req) + + def get_as_params(self): + params = {} + assert(len(self.requirements) <= 10) + for n, req in enumerate(self.requirements): + reqparams = req.get_as_params() + for rp in reqparams: + params['QualificationRequirement.%s.%s' % ((n+1), rp) ] = reqparams[rp] + return params + + +class Requirement(object): + """ + Representation of a single requirement + """ + + def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False): + self.qualification_type_id = qualification_type_id + self.comparator = comparator + self.integer_value = integer_value + self.required_to_preview = required_to_preview + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + } + if self.comparator in ('In', 'NotIn'): + for i, integer_value in enumerate(self.integer_value, 1): + params['IntegerValue.%d' % i] = integer_value + elif self.comparator not in ('Exists', 'DoesNotExist') and self.integer_value is not None: + params['IntegerValue'] = self.integer_value + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class PercentAssignmentsSubmittedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsSubmittedRequirement, self).__init__(qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsAbandonedRequirement(Requirement): + """ + The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsAbandonedRequirement, self).__init__(qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsReturnedRequirement(Requirement): + """ + The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsReturnedRequirement, self).__init__(qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsApprovedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsApprovedRequirement, self).__init__(qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsRejectedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(PercentAssignmentsRejectedRequirement, self).__init__(qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class NumberHitsApprovedRequirement(Requirement): + """ + Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0. + + If specifying a Country and Subdivision, use a tuple of valid ISO 3166 country code and ISO 3166-2 subdivision code, e.g. ('US', 'CA') for the US State of California. + + When using the 'In' and 'NotIn', locale should be a list of Countries and/or (Country, Subdivision) tuples. + + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(NumberHitsApprovedRequirement, self).__init__(qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class LocaleRequirement(Requirement): + """ + A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account. + """ + + def __init__(self, comparator, locale, required_to_preview=False): + super(LocaleRequirement, self).__init__(qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) + self.locale = locale + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + } + if self.comparator in ('In', 'NotIn'): + for i, locale in enumerate(self.locale, 1): + if isinstance(locale, tuple): + params['LocaleValue.%d.Country' % i] = locale[0] + params['LocaleValue.%d.Subdivision' % i] = locale[1] + else: + params['LocaleValue.%d.Country' % i] = locale + else: + if isinstance(self.locale, tuple): + params['LocaleValue.Country'] = self.locale[0] + params['LocaleValue.Subdivision'] = self.locale[1] + else: + params['LocaleValue.Country'] = self.locale + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class AdultRequirement(Requirement): + """ + Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default). + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + super(AdultRequirement, self).__init__(qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) diff --git a/ext/boto/mturk/question.py b/ext/boto/mturk/question.py new file mode 100644 index 0000000000..293b0782ed --- /dev/null +++ b/ext/boto/mturk/question.py @@ -0,0 +1,455 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils + +class Question(object): + template = "%(items)s" + + def __init__(self, identifier, content, answer_spec, + is_required=False, display_name=None): + # copy all of the parameters into object attributes + self.__dict__.update(vars()) + del self.self + + def get_as_params(self, label='Question'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + items = [ + SimpleField('QuestionIdentifier', self.identifier), + SimpleField('IsRequired', str(self.is_required).lower()), + self.content, + self.answer_spec, + ] + if self.display_name is not None: + items.insert(1, SimpleField('DisplayName', self.display_name)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + +try: + from lxml import etree + + class ValidatingXML(object): + + def validate(self): + import urllib2 + schema_src_file = urllib2.urlopen(self.schema_url) + schema_doc = etree.parse(schema_src_file) + schema = etree.XMLSchema(schema_doc) + doc = etree.fromstring(self.get_as_xml()) + schema.assertValid(doc) +except ImportError: + class ValidatingXML(object): + + def validate(self): + pass + + +class ExternalQuestion(ValidatingXML): + """ + An object for constructing an External Question. + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd" + template = '%%(external_url)s%%(frame_height)s' % vars() + + def __init__(self, external_url, frame_height): + self.external_url = xml.sax.saxutils.escape( external_url ) + self.frame_height = frame_height + + def get_as_params(self, label='ExternalQuestion'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class XMLTemplate(object): + def get_as_xml(self): + return self.template % vars(self) + + +class SimpleField(XMLTemplate): + """ + A Simple name/value pair that can be easily rendered as XML. + + >>> SimpleField('Text', 'A text string').get_as_xml() + 'A text string' + """ + template = '<%(field)s>%(value)s' + + def __init__(self, field, value): + self.field = field + self.value = value + + +class Binary(XMLTemplate): + template = """%(type)s%(subtype)s%(url)s%(alt_text)s""" + + def __init__(self, type, subtype, url, alt_text): + self.__dict__.update(vars()) + del self.self + + +class List(list): + """A bulleted list suitable for OrderedContent or Overview content""" + def get_as_xml(self): + items = ''.join('%s' % item for item in self) + return '%s' % items + + +class Application(object): + template = "<%(class_)s>%(content)s" + parameter_template = "%(name)s%(value)s" + + def __init__(self, width, height, **parameters): + self.width = width + self.height = height + self.parameters = parameters + + def get_inner_content(self, content): + content.append_field('Width', self.width) + content.append_field('Height', self.height) + for name, value in self.parameters.items(): + value = self.parameter_template % vars() + content.append_field('ApplicationParameter', value) + + def get_as_xml(self): + content = OrderedContent() + self.get_inner_content(content) + content = content.get_as_xml() + class_ = self.__class__.__name__ + return self.template % vars() + + +class HTMLQuestion(ValidatingXML): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd' + template = '%%(html_form)s]]>%%(frame_height)s' % vars() + + def __init__(self, html_form, frame_height): + self.html_form = html_form + self.frame_height = frame_height + + def get_as_params(self, label="HTMLQuestion"): + return {label: self.get_as_xml()} + + def get_as_xml(self): + return self.template % vars(self) + + +class JavaApplet(Application): + def __init__(self, path, filename, *args, **kwargs): + self.path = path + self.filename = filename + super(JavaApplet, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('AppletPath', self.path) + content.append_field('AppletFilename', self.filename) + super(JavaApplet, self).get_inner_content(content) + + +class Flash(Application): + def __init__(self, url, *args, **kwargs): + self.url = url + super(Flash, self).__init__(*args, **kwargs) + + def get_inner_content(self, content): + content = OrderedContent() + content.append_field('FlashMovieURL', self.url) + super(Flash, self).get_inner_content(content) + + +class FormattedContent(XMLTemplate): + schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd' + template = '' + + def __init__(self, content): + self.content = content + + +class OrderedContent(list): + + def append_field(self, field, value): + self.append(SimpleField(field, value)) + + def get_as_xml(self): + return ''.join(item.get_as_xml() for item in self) + + +class Overview(OrderedContent): + template = '%(content)s' + + def get_as_params(self, label='Overview'): + return {label: self.get_as_xml()} + + def get_as_xml(self): + content = super(Overview, self).get_as_xml() + return self.template % vars() + + +class QuestionForm(ValidatingXML, list): + """ + From the AMT API docs: + + The top-most element of the QuestionForm data structure is a + QuestionForm element. This element contains optional Overview + elements and one or more Question elements. There can be any + number of these two element types listed in any order. The + following example structure has an Overview element and a + Question element followed by a second Overview element and + Question element--all within the same QuestionForm. + + :: + + + + [...] + + + [...] + + + [...] + + + [...] + + [...] + + + QuestionForm is implemented as a list, so to construct a + QuestionForm, simply append Questions and Overviews (with at least + one Question). + """ + schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd" + xml_template = """%%(items)s""" % vars() + + def is_valid(self): + return ( + any(isinstance(item, Question) for item in self) + and + all(isinstance(item, (Question, Overview)) for item in self) + ) + + def get_as_xml(self): + assert self.is_valid(), "QuestionForm contains invalid elements" + items = ''.join(item.get_as_xml() for item in self) + return self.xml_template % vars() + + +class QuestionContent(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(QuestionContent, self).get_as_xml() + return self.template % vars() + + +class AnswerSpecification(object): + template = '%(spec)s' + + def __init__(self, spec): + self.spec = spec + + def get_as_xml(self): + spec = self.spec.get_as_xml() + return self.template % vars() + + +class Constraints(OrderedContent): + template = '%(content)s' + + def get_as_xml(self): + content = super(Constraints, self).get_as_xml() + return self.template % vars() + + +class Constraint(object): + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%d"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + def get_as_xml(self): + attrs = self.get_attributes() + return self.template % vars() + + +class NumericConstraint(Constraint): + attribute_names = 'minValue', 'maxValue' + template = '' + + def __init__(self, min_value=None, max_value=None): + self.attribute_values = min_value, max_value + + +class LengthConstraint(Constraint): + attribute_names = 'minLength', 'maxLength' + template = '' + + def __init__(self, min_length=None, max_length=None): + self.attribute_values = min_length, max_length + + +class RegExConstraint(Constraint): + attribute_names = 'regex', 'errorText', 'flags' + template = '' + + def __init__(self, pattern, error_text=None, flags=None): + self.attribute_values = pattern, error_text, flags + + def get_attributes(self): + pairs = zip(self.attribute_names, self.attribute_values) + attrs = ' '.join( + '%s="%s"' % (name, value) + for (name, value) in pairs + if value is not None + ) + return attrs + + +class NumberOfLinesSuggestion(object): + template = '%(num_lines)s' + + def __init__(self, num_lines=1): + self.num_lines = num_lines + + def get_as_xml(self): + num_lines = self.num_lines + return self.template % vars() + + +class FreeTextAnswer(object): + template = '%(items)s' + + def __init__(self, default=None, constraints=None, num_lines=None): + self.default = default + if constraints is None: + self.constraints = Constraints() + else: + self.constraints = Constraints(constraints) + self.num_lines = num_lines + + def get_as_xml(self): + items = [self.constraints] + if self.default: + items.append(SimpleField('DefaultText', self.default)) + if self.num_lines: + items.append(NumberOfLinesSuggestion(self.num_lines)) + items = ''.join(item.get_as_xml() for item in items) + return self.template % vars() + + +class FileUploadAnswer(object): + template = """%(max_bytes)d%(min_bytes)d""" + + def __init__(self, min_bytes, max_bytes): + assert 0 <= min_bytes <= max_bytes <= 2 * 10 ** 9 + self.min_bytes = min_bytes + self.max_bytes = max_bytes + + def get_as_xml(self): + return self.template % vars(self) + + +class SelectionAnswer(object): + """ + A class to generate SelectionAnswer XML data structures. + Does not yet implement Binary selection options. + """ + SELECTIONANSWER_XML_TEMPLATE = """%s%s%s""" # % (count_xml, style_xml, selections_xml) + SELECTION_XML_TEMPLATE = """%s%s""" # (identifier, value_xml) + SELECTION_VALUE_XML_TEMPLATE = """<%s>%s""" # (type, value, type) + STYLE_XML_TEMPLATE = """%s""" # (style) + MIN_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + MAX_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser'] + OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection' + + def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False): + + if style is not None: + if style in SelectionAnswer.ACCEPTED_STYLES: + self.style_suggestion = style + else: + raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES))) + else: + self.style_suggestion = None + + if selections is None: + raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples") + else: + self.selections = selections + + self.min_selections = min + self.max_selections = max + + assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections + #assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections + + self.type = type + + self.other = other + + def get_as_xml(self): + if self.type == 'text': + TYPE_TAG = "Text" + elif self.type == 'binary': + TYPE_TAG = "Binary" + else: + raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type)) + + # build list of elements + selections_xml = "" + for tpl in self.selections: + value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG) + selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml) + selections_xml += selection_xml + + if self.other: + # add OtherSelection element as xml if available + if hasattr(self.other, 'get_as_xml'): + assert isinstance(self.other, FreeTextAnswer), 'OtherSelection can only be a FreeTextAnswer' + selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection') + else: + selections_xml += "" + + if self.style_suggestion is not None: + style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion + else: + style_xml = "" + + if self.style_suggestion != 'radiobutton': + count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections + count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections + else: + count_xml = "" + + ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml) + + # return XML + return ret diff --git a/ext/boto/mws/__init__.py b/ext/boto/mws/__init__.py new file mode 100644 index 0000000000..d69b7f08a4 --- /dev/null +++ b/ext/boto/mws/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# diff --git a/ext/boto/mws/connection.py b/ext/boto/mws/connection.py new file mode 100644 index 0000000000..687fae74f0 --- /dev/null +++ b/ext/boto/mws/connection.py @@ -0,0 +1,1168 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import hashlib +import string +import collections +from boto.connection import AWSQueryConnection +from boto.exception import BotoServerError +import boto.mws.exception +import boto.mws.response +from boto.handler import XmlHandler +from boto.compat import filter, map, six, encodebytes + +__all__ = ['MWSConnection'] + +api_version_path = { + 'Feeds': ('2009-01-01', 'Merchant', '/'), + 'Reports': ('2009-01-01', 'Merchant', '/'), + 'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'), + 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), + 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), + 'Inbound': ('2010-10-01', 'SellerId', + '/FulfillmentInboundShipment/2010-10-01'), + 'Outbound': ('2010-10-01', 'SellerId', + '/FulfillmentOutboundShipment/2010-10-01'), + 'Inventory': ('2010-10-01', 'SellerId', + '/FulfillmentInventory/2010-10-01'), + 'Recommendations': ('2013-04-01', 'SellerId', + '/Recommendations/2013-04-01'), + 'CustomerInfo': ('2014-03-01', 'SellerId', + '/CustomerInformation/2014-03-01'), + 'CartInfo': ('2014-03-01', 'SellerId', + '/CartInformation/2014-03-01'), + 'Subscriptions': ('2013-07-01', 'SellerId', + '/Subscriptions/2013-07-01'), + 'OffAmazonPayments': ('2013-01-01', 'SellerId', + '/OffAmazonPayments/2013-01-01'), +} +content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip() +decorated_attrs = ('action', 'response', 'section', + 'quota', 'restore', 'version') +api_call_map = {} + + +def add_attrs_from(func, to): + for attr in decorated_attrs: + setattr(to, attr, getattr(func, attr, None)) + to.__wrapped__ = func + return to + + +def structured_lists(*fields): + + def decorator(func): + + def wrapper(self, *args, **kw): + for key, acc in [f.split('.') for f in fields]: + if key in kw: + newkey = key + '.' + acc + (acc and '.' or '') + for i in range(len(kw[key])): + kw[newkey + str(i + 1)] = kw[key][i] + kw.pop(key) + return func(self, *args, **kw) + wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def http_body(field): + + def decorator(func): + + def wrapper(*args, **kw): + if any([f not in kw for f in (field, 'content_type')]): + message = "{0} requires {1} and content_type arguments for " \ + "building HTTP body".format(func.action, field) + raise KeyError(message) + kw['body'] = kw.pop(field) + kw['headers'] = { + 'Content-Type': kw.pop('content_type'), + 'Content-MD5': content_md5(kw['body']), + } + return func(*args, **kw) + wrapper.__doc__ = "{0}\nRequired HTTP Body: " \ + "{1}".format(func.__doc__, field) + return add_attrs_from(func, to=wrapper) + return decorator + + +def destructure_object(value, into, prefix, members=False): + if isinstance(value, boto.mws.response.ResponseElement): + destructure_object(value.__dict__, into, prefix, members=members) + elif isinstance(value, collections.Mapping): + for name in value: + if name.startswith('_'): + continue + destructure_object(value[name], into, prefix + '.' + name, + members=members) + elif isinstance(value, six.string_types): + into[prefix] = value + elif isinstance(value, collections.Iterable): + for index, element in enumerate(value): + suffix = (members and '.member.' or '.') + str(index + 1) + destructure_object(element, into, prefix + suffix, + members=members) + elif isinstance(value, bool): + into[prefix] = str(value).lower() + else: + into[prefix] = value + + +def structured_objects(*fields, **kwargs): + + def decorator(func): + + def wrapper(*args, **kw): + members = kwargs.get('members', False) + for field in filter(lambda i: i in kw, fields): + destructure_object(kw.pop(field), kw, field, members=members) + return func(*args, **kw) + wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \ + "(ResponseElement or anything iterable/dict-like)" \ + .format(func.__doc__, ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires(*groups): + + def decorator(func): + + def requires(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if 1 != len(list(filter(hasgroup, groups))): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires {1} argument(s)" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=requires) + return decorator + + +def exclusive(*groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if len(list(filter(hasgroup, groups))) not in (0, 1): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} requires either {1}" \ + "".format(func.action, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def dependent(field, *groups): + + def decorator(func): + + def wrapper(*args, **kw): + hasgroup = lambda group: all(key in kw for key in group) + if field in kw and not any(hasgroup(g) for g in groups): + message = ' OR '.join(['+'.join(g) for g in groups]) + message = "{0} argument {1} requires {2}" \ + "".format(func.action, field, message) + raise KeyError(message) + return func(*args, **kw) + message = ' OR '.join(['+'.join(g) for g in groups]) + wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, + field, + message) + return add_attrs_from(func, to=wrapper) + return decorator + + +def requires_some_of(*fields): + + def decorator(func): + + def requires(*args, **kw): + if not any(i in kw for i in fields): + message = "{0} requires at least one of {1} argument(s)" \ + "".format(func.action, ', '.join(fields)) + raise KeyError(message) + return func(*args, **kw) + requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=requires) + return decorator + + +def boolean_arguments(*fields): + + def decorator(func): + + def wrapper(*args, **kw): + for field in [f for f in fields if isinstance(kw.get(f), bool)]: + kw[field] = str(kw[field]).lower() + return func(*args, **kw) + wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, + ', '.join(fields)) + return add_attrs_from(func, to=wrapper) + return decorator + + +def api_action(section, quota, restore, *api): + + def decorator(func, quota=int(quota), restore=float(restore)): + version, accesskey, path = api_version_path[section] + action = ''.join(api or map(str.capitalize, func.__name__.split('_'))) + + def wrapper(self, *args, **kw): + kw.setdefault(accesskey, getattr(self, accesskey, None)) + if kw[accesskey] is None: + message = "{0} requires {1} argument. Set the " \ + "MWSConnection.{2} attribute?" \ + "".format(action, accesskey, accesskey) + raise KeyError(message) + kw['Action'] = action + kw['Version'] = version + response = self._response_factory(action, connection=self) + request = dict(path=path, quota=quota, restore=restore) + return func(self, request, response, *args, **kw) + for attr in decorated_attrs: + setattr(wrapper, attr, locals().get(attr)) + wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ + "{4}".format(action, version, quota, restore, + func.__doc__) + api_call_map[action] = func.__name__ + return wrapper + return decorator + + +class MWSConnection(AWSQueryConnection): + + ResponseFactory = boto.mws.response.ResponseFactory + ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory + + def __init__(self, *args, **kw): + kw.setdefault('host', 'mws.amazonservices.com') + self._sandboxed = kw.pop('sandbox', False) + self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId') + self.SellerId = kw.pop('SellerId', None) or self.Merchant + kw = self._setup_factories(kw.pop('factory_scopes', []), **kw) + super(MWSConnection, self).__init__(*args, **kw) + + def _setup_factories(self, extrascopes, **kw): + for factory, (scope, Default) in { + 'response_factory': + (boto.mws.response, self.ResponseFactory), + 'response_error_factory': + (boto.mws.exception, self.ResponseErrorFactory), + }.items(): + if factory in kw: + setattr(self, '_' + factory, kw.pop(factory)) + else: + scopes = extrascopes + [scope] + setattr(self, '_' + factory, Default(scopes=scopes)) + return kw + + def _sandboxify(self, path): + if not self._sandboxed: + return path + splat = path.split('/') + splat[-2] += '_Sandbox' + return '/'.join(splat) + + def _required_auth_capability(self): + return ['mws'] + + def _post_request(self, request, params, parser, body='', headers=None): + """Make a POST request, optionally with a content body, + and return the response, optionally as raw text. + """ + headers = headers or {} + path = self._sandboxify(request['path']) + request = self.build_base_http_request('POST', path, None, data=body, + params=params, headers=headers, + host=self.host) + try: + response = self._mexe(request, override_num_retries=None) + except BotoServerError as bs: + raise self._response_error_factory(bs.status, bs.reason, bs.body) + body = response.read() + boto.log.debug(body) + if not body: + boto.log.error('Null body %s' % body) + raise self._response_error_factory(response.status, + response.reason, body) + if response.status != 200: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self._response_error_factory(response.status, + response.reason, body) + digest = response.getheader('Content-MD5') + if digest is not None: + assert content_md5(body) == digest + contenttype = response.getheader('Content-Type') + return self._parse_response(parser, contenttype, body) + + def _parse_response(self, parser, contenttype, body): + if not contenttype.startswith('text/xml'): + return body + handler = XmlHandler(parser, self) + xml.sax.parseString(body, handler) + return parser + + def method_for(self, name): + """Return the MWS API method referred to in the argument. + The named method can be in CamelCase or underlined_lower_case. + This is the complement to MWSConnection.any_call.action + """ + action = '_' in name and string.capwords(name, '_') or name + if action in api_call_map: + return getattr(self, api_call_map[action]) + return None + + def iter_call(self, call, *args, **kw): + """Pass a call name as the first argument and a generator + is returned for the initial response and any continuation + call responses made using the NextToken. + """ + method = self.method_for(call) + assert method, 'No call named "{0}"'.format(call) + return self.iter_response(method(*args, **kw)) + + def iter_response(self, response): + """Pass a call's response as the initial argument and a + generator is returned for the initial response and any + continuation call responses made using the NextToken. + """ + yield response + more = self.method_for(response._action + 'ByNextToken') + while more and response._result.HasNext == 'true': + response = more(NextToken=response._result.NextToken) + yield response + + @requires(['FeedType']) + @boolean_arguments('PurgeAndReplace') + @http_body('FeedContent') + @structured_lists('MarketplaceIdList.Id') + @api_action('Feeds', 15, 120) + def submit_feed(self, request, response, headers=None, body='', **kw): + """Uploads a feed for processing by Amazon MWS. + """ + headers = headers or {} + return self._post_request(request, kw, response, body=body, + headers=headers) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', + 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_list(self, request, response, **kw): + """Returns a list of all feed submissions submitted in the + previous 90 days. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Feeds', 0, 0) + def get_feed_submission_list_by_next_token(self, request, response, **kw): + """Returns a list of feed submissions using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status') + @api_action('Feeds', 10, 45) + def get_feed_submission_count(self, request, response, **kw): + """Returns a count of the feeds submitted in the previous 90 days. + """ + return self._post_request(request, kw, response) + + @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type') + @api_action('Feeds', 10, 45) + def cancel_feed_submissions(self, request, response, **kw): + """Cancels one or more feed submissions and returns a + count of the feed submissions that were canceled. + """ + return self._post_request(request, kw, response) + + @requires(['FeedSubmissionId']) + @api_action('Feeds', 15, 60) + def get_feed_submission_result(self, request, response, **kw): + """Returns the feed processing report. + """ + return self._post_request(request, kw, response) + + def get_service_status(self, **kw): + """Instruct the user on how to get service status. + """ + sections = ', '.join(map(str.lower, api_version_path.keys())) + message = "Use {0}.get_(section)_service_status(), " \ + "where (section) is one of the following: " \ + "{1}".format(self.__class__.__name__, sections) + raise AttributeError(message) + + @requires(['ReportType']) + @structured_lists('MarketplaceIdList.Id') + @boolean_arguments('ReportOptions=ShowSalesChannel') + @api_action('Reports', 15, 60) + def request_report(self, request, response, **kw): + """Creates a report request and submits the request to Amazon MWS. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_list(self, request, response, **kw): + """Returns a list of report requests that you can use to get the + ReportRequestId for a report. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_request_list_by_next_token(self, request, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportRequestListByNextToken or GetReportRequestList, where + the value of HasNext was true in that previous request. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type', + 'ReportProcessingStatusList.Status') + @api_action('Reports', 10, 45) + def get_report_request_count(self, request, response, **kw): + """Returns a count of report requests that have been submitted + to Amazon MWS for processing. + """ + return self._post_request(request, kw, response) + + @api_action('Reports', 10, 45) + def cancel_report_requests(self, request, response, **kw): + """Cancel one or more report requests, returning the count of the + canceled report requests and the report request information. + """ + return self._post_request(request, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type') + @api_action('Reports', 10, 60) + def get_report_list(self, request, response, **kw): + """Returns a list of reports that were created in the previous + 90 days that match the query parameters. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_list_by_next_token(self, request, response, **kw): + """Returns a list of reports using the NextToken, which + was supplied by a previous request to either + GetReportListByNextToken or GetReportList, where the + value of HasNext was true in the previous call. + """ + return self._post_request(request, kw, response) + + @boolean_arguments('Acknowledged') + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_count(self, request, response, **kw): + """Returns a count of the reports, created in the previous 90 days, + with a status of _DONE_ and that are available for download. + """ + return self._post_request(request, kw, response) + + @requires(['ReportId']) + @api_action('Reports', 15, 60) + def get_report(self, request, response, **kw): + """Returns the contents of a report. + """ + return self._post_request(request, kw, response) + + @requires(['ReportType', 'Schedule']) + @api_action('Reports', 10, 45) + def manage_report_schedule(self, request, response, **kw): + """Creates, updates, or deletes a report request schedule for + a specified report type. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_list(self, request, response, **kw): + """Returns a list of order report requests that are scheduled + to be submitted to Amazon MWS for processing. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Reports', 0, 0) + def get_report_schedule_list_by_next_token(self, request, response, **kw): + """Returns a list of report requests using the NextToken, + which was supplied by a previous request to either + GetReportScheduleListByNextToken or GetReportScheduleList, + where the value of HasNext was true in that previous request. + """ + return self._post_request(request, kw, response) + + @structured_lists('ReportTypeList.Type') + @api_action('Reports', 10, 45) + def get_report_schedule_count(self, request, response, **kw): + """Returns a count of order report requests that are scheduled + to be submitted to Amazon MWS. + """ + return self._post_request(request, kw, response) + + @requires(['ReportIdList']) + @boolean_arguments('Acknowledged') + @structured_lists('ReportIdList.Id') + @api_action('Reports', 10, 45) + def update_report_acknowledgements(self, request, response, **kw): + """Updates the acknowledged status of one or more reports. + """ + return self._post_request(request, kw, response) + + @requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems']) + @structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment_plan(self, request, response, **kw): + """Returns the information required to create an inbound shipment. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def create_inbound_shipment(self, request, response, **kw): + """Creates an inbound shipment. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId']) + @structured_objects('InboundShipmentHeader', 'InboundShipmentItems') + @api_action('Inbound', 30, 0.5) + def update_inbound_shipment(self, request, response, **kw): + """Updates an existing inbound shipment. Amazon documentation + is ambiguous as to whether the InboundShipmentHeader and + InboundShipmentItems arguments are required. + """ + return self._post_request(request, kw, response) + + @requires_some_of('ShipmentIdList', 'ShipmentStatusList') + @structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status') + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments(self, request, response, **kw): + """Returns a list of inbound shipments based on criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipments_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipments using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items(self, request, response, **kw): + """Returns a list of items in a specified inbound shipment, or a + list of items that were updated within a specified time frame. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inbound', 30, 0.5) + def list_inbound_shipment_items_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Inbound', 2, 300, 'GetServiceStatus') + def get_inbound_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Inbound + Shipment API section. + """ + return self._post_request(request, kw, response) + + @requires(['SellerSkus'], ['QueryStartDateTime']) + @structured_lists('SellerSkus.member') + @api_action('Inventory', 30, 0.5) + def list_inventory_supply(self, request, response, **kw): + """Returns information about the availability of a seller's + inventory. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Inventory', 30, 0.5) + def list_inventory_supply_by_next_token(self, request, response, **kw): + """Returns the next page of information about the availability + of a seller's inventory using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Inventory', 2, 300, 'GetServiceStatus') + def get_inventory_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Inventory + API section. + """ + return self._post_request(request, kw, response) + + @requires(['PackageNumber']) + @api_action('Outbound', 30, 0.5) + def get_package_tracking_details(self, request, response, **kw): + """Returns delivery tracking information for a package in + an outbound shipment for a Multi-Channel Fulfillment order. + """ + return self._post_request(request, kw, response) + + @requires(['Address', 'Items']) + @structured_objects('Address', 'Items') + @api_action('Outbound', 30, 0.5) + def get_fulfillment_preview(self, request, response, **kw): + """Returns a list of fulfillment order previews based on items + and shipping speed categories that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId', 'DisplayableOrderId', + 'ShippingSpeedCategory', 'DisplayableOrderDateTime', + 'DestinationAddress', 'DisplayableOrderComment', + 'Items']) + @structured_objects('DestinationAddress', 'Items') + @api_action('Outbound', 30, 0.5) + def create_fulfillment_order(self, request, response, **kw): + """Requests that Amazon ship items from the seller's inventory + to a destination address. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def get_fulfillment_order(self, request, response, **kw): + """Returns a fulfillment order based on a specified + SellerFulfillmentOrderId. + """ + return self._post_request(request, kw, response) + + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders(self, request, response, **kw): + """Returns a list of fulfillment orders fulfilled after (or + at) a specified date or by fulfillment method. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Outbound', 30, 0.5) + def list_all_fulfillment_orders_by_next_token(self, request, response, **kw): + """Returns the next page of inbound shipment items using the + NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['SellerFulfillmentOrderId']) + @api_action('Outbound', 30, 0.5) + def cancel_fulfillment_order(self, request, response, **kw): + """Requests that Amazon stop attempting to fulfill an existing + fulfillment order. + """ + return self._post_request(request, kw, response) + + @api_action('Outbound', 2, 300, 'GetServiceStatus') + def get_outbound_service_status(self, request, response, **kw): + """Returns the operational status of the Fulfillment Outbound + API section. + """ + return self._post_request(request, kw, response) + + @requires(['CreatedAfter'], ['LastUpdatedAfter']) + @requires(['MarketplaceId']) + @exclusive(['CreatedAfter'], ['LastUpdatedAfter']) + @dependent('CreatedBefore', ['CreatedAfter']) + @exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId']) + @dependent('LastUpdatedBefore', ['LastUpdatedAfter']) + @exclusive(['CreatedAfter'], ['LastUpdatedBefore']) + @structured_objects('OrderTotal', 'ShippingAddress', + 'PaymentExecutionDetail') + @structured_lists('MarketplaceId.Id', 'OrderStatus.Status', + 'FulfillmentChannel.Channel', 'PaymentMethod.') + @api_action('Orders', 6, 60) + def list_orders(self, request, response, **kw): + """Returns a list of orders created or updated during a time + frame that you specify. + """ + toggle = set(('FulfillmentChannel.Channel.1', + 'OrderStatus.Status.1', 'PaymentMethod.1', + 'LastUpdatedAfter', 'LastUpdatedBefore')) + for do, dont in { + 'BuyerEmail': toggle.union(['SellerOrderId']), + 'SellerOrderId': toggle.union(['BuyerEmail']), + }.items(): + if do in kw and any(i in dont for i in kw): + message = "Don't include {0} when specifying " \ + "{1}".format(' or '.join(dont), do) + raise AssertionError(message) + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 6, 60) + def list_orders_by_next_token(self, request, response, **kw): + """Returns the next page of orders using the NextToken value + that was returned by your previous request to either + ListOrders or ListOrdersByNextToken. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderId']) + @structured_lists('AmazonOrderId.Id') + @api_action('Orders', 6, 60) + def get_order(self, request, response, **kw): + """Returns an order for each AmazonOrderId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderId']) + @api_action('Orders', 30, 2) + def list_order_items(self, request, response, **kw): + """Returns order item information for an AmazonOrderId that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Orders', 30, 2) + def list_order_items_by_next_token(self, request, response, **kw): + """Returns the next page of order items using the NextToken + value that was returned by your previous request to either + ListOrderItems or ListOrderItemsByNextToken. + """ + return self._post_request(request, kw, response) + + @api_action('Orders', 2, 300, 'GetServiceStatus') + def get_orders_service_status(self, request, response, **kw): + """Returns the operational status of the Orders API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Query']) + @api_action('Products', 20, 20) + def list_matching_products(self, request, response, **kw): + """Returns a list of products and their attributes, ordered + by relevancy, based on a search query that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 20) + def get_matching_product(self, request, response, **kw): + """Returns a list of products and their attributes, based on + a list of ASIN values that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'IdType', 'IdList']) + @structured_lists('IdList.Id') + @api_action('Products', 20, 20) + def get_matching_product_for_id(self, request, response, **kw): + """Returns a list of products and their attributes, based on + a list of Product IDs that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetCompetitivePricingForSKU') + def get_competitive_pricing_for_sku(self, request, response, **kw): + """Returns the current competitive pricing of a product, + based on the SellerSKUs and MarketplaceId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetCompetitivePricingForASIN') + def get_competitive_pricing_for_asin(self, request, response, **kw): + """Returns the current competitive pricing of a product, + based on the ASINs and MarketplaceId that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU') + def get_lowest_offer_listings_for_sku(self, request, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and SellerSKUs. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN') + def get_lowest_offer_listings_for_asin(self, request, response, **kw): + """Returns the lowest price offer listings for a specific + product by item condition and ASINs. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKU']) + @api_action('Products', 20, 20, 'GetProductCategoriesForSKU') + def get_product_categories_for_sku(self, request, response, **kw): + """Returns the product categories that a SellerSKU belongs to. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASIN']) + @api_action('Products', 20, 20, 'GetProductCategoriesForASIN') + def get_product_categories_for_asin(self, request, response, **kw): + """Returns the product categories that an ASIN belongs to. + """ + return self._post_request(request, kw, response) + + @api_action('Products', 2, 300, 'GetServiceStatus') + def get_products_service_status(self, request, response, **kw): + """Returns the operational status of the Products API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'SellerSKUList']) + @structured_lists('SellerSKUList.SellerSKU') + @api_action('Products', 20, 10, 'GetMyPriceForSKU') + def get_my_price_for_sku(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on SellerSKU. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'ASINList']) + @structured_lists('ASINList.ASIN') + @api_action('Products', 20, 10, 'GetMyPriceForASIN') + def get_my_price_for_asin(self, request, response, **kw): + """Returns pricing information for your own offer listings, based on ASIN. + """ + return self._post_request(request, kw, response) + + @api_action('Sellers', 15, 60) + def list_marketplace_participations(self, request, response, **kw): + """Returns a list of marketplaces that the seller submitting + the request can sell in, and a list of participations that + include seller-specific information in that marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Sellers', 15, 60) + def list_marketplace_participations_by_next_token(self, request, response, + **kw): + """Returns the next page of marketplaces and participations + using the NextToken value that was returned by your + previous request to either ListMarketplaceParticipations + or ListMarketplaceParticipationsByNextToken. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Recommendations', 5, 2) + def get_last_updated_time_for_recommendations(self, request, response, + **kw): + """Checks whether there are active recommendations for each category + for the given marketplace, and if there are, returns the time when + recommendations were last updated for each category. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @structured_lists('CategoryQueryList.CategoryQuery') + @api_action('Recommendations', 5, 2) + def list_recommendations(self, request, response, **kw): + """Returns your active recommendations for a specific category or for + all categories for a specific marketplace. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('Recommendations', 5, 2) + def list_recommendations_by_next_token(self, request, response, **kw): + """Returns the next page of recommendations using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @api_action('Recommendations', 2, 300, 'GetServiceStatus') + def get_recommendations_service_status(self, request, response, **kw): + """Returns the operational status of the Recommendations API section. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 15, 12) + def list_customers(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CustomerInfo', 50, 3) + def list_customers_by_next_token(self, request, response, **kw): + """Returns the next page of customers using the NextToken parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CustomerIdList']) + @structured_lists('CustomerIdList.CustomerId') + @api_action('CustomerInfo', 15, 12) + def get_customers_for_customer_id(self, request, response, **kw): + """Returns a list of customer accounts based on search criteria that + you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CustomerInfo', 2, 300, 'GetServiceStatus') + def get_customerinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Customer Information API + section. + """ + return self._post_request(request, kw, response) + + @requires(['DateRangeStart']) + @api_action('CartInfo', 15, 12) + def list_carts(self, request, response, **kw): + """Returns a list of shopping carts in your Webstore that were last + updated during the time range that you specify. + """ + return self._post_request(request, kw, response) + + @requires(['NextToken']) + @api_action('CartInfo', 50, 3) + def list_carts_by_next_token(self, request, response, **kw): + """Returns the next page of shopping carts using the NextToken + parameter. + """ + return self._post_request(request, kw, response) + + @requires(['CartIdList']) + @structured_lists('CartIdList.CartId') + @api_action('CartInfo', 15, 12) + def get_carts(self, request, response, **kw): + """Returns shopping carts based on the CartId values that you specify. + """ + return self._post_request(request, kw, response) + + @api_action('CartInfo', 2, 300, 'GetServiceStatus') + def get_cartinfo_service_status(self, request, response, **kw): + """Returns the operational status of the Cart Information API section. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def register_destination(self, request, response, **kw): + """Specifies a new destination where you want to receive notifications. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def deregister_destination(self, request, response, **kw): + """Removes an existing destination from the list of registered + destinations. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_registered_destinations(self, request, response, **kw): + """Lists all current destinations that you have registered. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def send_test_notification_to_destination(self, request, response, **kw): + """Sends a test notification to an existing destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def create_subscription(self, request, response, **kw): + """Creates a new subscription for the specified notification type + and destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def get_subscription(self, request, response, **kw): + """Gets the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'NotificationType', 'Destination']) + @structured_objects('Destination', members=True) + @api_action('Subscriptions', 25, 0.5) + def delete_subscription(self, request, response, **kw): + """Deletes the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId']) + @api_action('Subscriptions', 25, 0.5) + def list_subscriptions(self, request, response, **kw): + """Returns a list of all your current subscriptions. + """ + return self._post_request(request, kw, response) + + @requires(['MarketplaceId', 'Subscription']) + @structured_objects('Subscription', members=True) + @api_action('Subscriptions', 25, 0.5) + def update_subscription(self, request, response, **kw): + """Updates the subscription for the specified notification type and + destination. + """ + return self._post_request(request, kw, response) + + @api_action('Subscriptions', 2, 300, 'GetServiceStatus') + def get_subscriptions_service_status(self, request, response, **kw): + """Returns the operational status of the Subscriptions API section. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes']) + @structured_objects('OrderReferenceAttributes') + @api_action('OffAmazonPayments', 10, 1) + def set_order_reference_details(self, request, response, **kw): + """Sets order reference details such as the order total and a + description for the order. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 20, 2) + def get_order_reference_details(self, request, response, **kw): + """Returns details about the Order Reference object and its current + state. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def confirm_order_reference(self, request, response, **kw): + """Confirms that the order reference is free of constraints and all + required information has been set on the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def cancel_order_reference(self, request, response, **kw): + """Cancel an order reference; all authorizations associated with + this order reference are also closed. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId']) + @api_action('OffAmazonPayments', 10, 1) + def close_order_reference(self, request, response, **kw): + """Confirms that an order reference has been fulfilled (fully + or partially) and that you do not expect to create any new + authorizations on this order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId', + 'AuthorizationAmount']) + @structured_objects('AuthorizationAmount') + @api_action('OffAmazonPayments', 10, 1) + def authorize(self, request, response, **kw): + """Reserves a specified amount against the payment method(s) stored in + the order reference. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 20, 2) + def get_authorization_details(self, request, response, **kw): + """Returns the status of a particular authorization and the total + amount captured on the authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount']) + @structured_objects('CaptureAmount') + @api_action('OffAmazonPayments', 10, 1) + def capture(self, request, response, **kw): + """Captures funds from an authorized payment instrument. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId']) + @api_action('OffAmazonPayments', 20, 2) + def get_capture_details(self, request, response, **kw): + """Returns the status of a particular capture and the total amount + refunded on the capture. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonAuthorizationId']) + @api_action('OffAmazonPayments', 10, 1) + def close_authorization(self, request, response, **kw): + """Closes an authorization. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount']) + @structured_objects('RefundAmount') + @api_action('OffAmazonPayments', 10, 1) + def refund(self, request, response, **kw): + """Refunds a previously captured amount. + """ + return self._post_request(request, kw, response) + + @requires(['AmazonRefundId']) + @api_action('OffAmazonPayments', 20, 2) + def get_refund_details(self, request, response, **kw): + """Returns the status of a particular refund. + """ + return self._post_request(request, kw, response) + + @api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus') + def get_offamazonpayments_service_status(self, request, response, **kw): + """Returns the operational status of the Off-Amazon Payments API + section. + """ + return self._post_request(request, kw, response) diff --git a/ext/boto/mws/exception.py b/ext/boto/mws/exception.py new file mode 100644 index 0000000000..fba8a5d5d2 --- /dev/null +++ b/ext/boto/mws/exception.py @@ -0,0 +1,70 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.exception import BotoServerError +from boto.mws.response import ResponseFactory + + +class ResponseErrorFactory(ResponseFactory): + + def __call__(self, status, reason, body=None): + server = BotoServerError(status, reason, body=body) + supplied = self.find_element(server.error_code, '', ResponseError) + print(supplied.__name__) + return supplied(status, reason, body=body) + + +class ResponseError(BotoServerError): + """ + Undefined response error. + """ + retry = False + + def __repr__(self): + return '{0.__name__}({1.reason}: "{1.message}")' \ + .format(self.__class__, self) + + def __str__(self): + doc = self.__doc__ and self.__doc__.strip() + "\n" or '' + return '{1.__name__}: {0.reason} {2}\n{3}' \ + '{0.message}'.format(self, self.__class__, + self.retry and '(Retriable)' or '', doc) + + +class RetriableResponseError(ResponseError): + retry = True + + +class InvalidParameterValue(ResponseError): + """ + One or more parameter values in the request is invalid. + """ + + +class InvalidParameter(ResponseError): + """ + One or more parameters in the request is invalid. + """ + + +class InvalidAddress(ResponseError): + """ + Invalid address. + """ diff --git a/ext/boto/mws/response.py b/ext/boto/mws/response.py new file mode 100644 index 0000000000..7e2e23c07e --- /dev/null +++ b/ext/boto/mws/response.py @@ -0,0 +1,787 @@ +# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the fol- lowing conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +from decimal import Decimal +from boto.compat import filter, map + + +class ComplexType(dict): + _value = 'Value' + + def __repr__(self): + return '{0}{1}'.format(getattr(self, self._value, None), self.copy()) + + def __str__(self): + return str(getattr(self, self._value, '')) + + +class DeclarativeType(object): + def __init__(self, _hint=None, **kw): + self._value = None + if _hint is not None: + self._hint = _hint + return + + class JITResponse(ResponseElement): + pass + self._hint = JITResponse + self._hint.__name__ = 'JIT_{0}/{1}'.format(self.__class__.__name__, + hex(id(self._hint))[2:]) + for name, value in kw.items(): + setattr(self._hint, name, value) + + def __repr__(self): + parent = getattr(self, '_parent', None) + return '<{0}_{1}/{2}_{3}>'.format(self.__class__.__name__, + parent and parent._name or '?', + getattr(self, '_name', '?'), + hex(id(self.__class__))) + + def setup(self, parent, name, *args, **kw): + self._parent = parent + self._name = name + self._clone = self.__class__(_hint=self._hint) + self._clone._parent = parent + self._clone._name = name + setattr(self._parent, self._name, self._clone) + + def start(self, *args, **kw): + raise NotImplementedError + + def end(self, *args, **kw): + raise NotImplementedError + + def teardown(self, *args, **kw): + setattr(self._parent, self._name, self._value) + + +class Element(DeclarativeType): + def start(self, *args, **kw): + self._value = self._hint(parent=self._parent, **kw) + return self._value + + def end(self, *args, **kw): + pass + + +class SimpleList(DeclarativeType): + def __init__(self, *args, **kw): + super(SimpleList, self).__init__(*args, **kw) + self._value = [] + + def start(self, *args, **kw): + return None + + def end(self, name, value, *args, **kw): + self._value.append(value) + + +class ElementList(SimpleList): + def start(self, *args, **kw): + value = self._hint(parent=self._parent, **kw) + self._value.append(value) + return value + + def end(self, *args, **kw): + pass + + +class MemberList(Element): + def __init__(self, _member=None, _hint=None, *args, **kw): + message = 'Invalid `member` specification in {0}'.format(self.__class__.__name__) + assert 'member' not in kw, message + if _member is None: + if _hint is None: + super(MemberList, self).__init__(*args, member=ElementList(**kw)) + else: + super(MemberList, self).__init__(_hint=_hint) + else: + if _hint is None: + if issubclass(_member, DeclarativeType): + member = _member(**kw) + else: + member = ElementList(_member, **kw) + super(MemberList, self).__init__(*args, member=member) + else: + message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__, + _hint) + raise AssertionError(message) + + def teardown(self, *args, **kw): + if self._value is None: + self._value = [] + else: + if isinstance(self._value.member, DeclarativeType): + self._value.member = [] + self._value = self._value.member + super(MemberList, self).teardown(*args, **kw) + + +class ResponseFactory(object): + def __init__(self, scopes=None): + self.scopes = [] if scopes is None else scopes + + def element_factory(self, name, parent): + class DynamicElement(parent): + _name = name + setattr(DynamicElement, '__name__', str(name)) + return DynamicElement + + def search_scopes(self, key): + for scope in self.scopes: + if hasattr(scope, key): + return getattr(scope, key) + if hasattr(scope, '__getitem__'): + if key in scope: + return scope[key] + + def find_element(self, action, suffix, parent): + element = self.search_scopes(action + suffix) + if element is not None: + return element + if action.endswith('ByNextToken'): + element = self.search_scopes(action[:-len('ByNextToken')] + suffix) + if element is not None: + return self.element_factory(action + suffix, element) + return self.element_factory(action + suffix, parent) + + def __call__(self, action, connection=None): + response = self.find_element(action, 'Response', Response) + if not hasattr(response, action + 'Result'): + result = self.find_element(action, 'Result', ResponseElement) + setattr(response, action + 'Result', Element(result)) + return response(connection=connection) + + +def strip_namespace(func): + def wrapper(self, name, *args, **kw): + if self._namespace is not None: + if name.startswith(self._namespace + ':'): + name = name[len(self._namespace + ':'):] + return func(self, name, *args, **kw) + return wrapper + + +class ResponseElement(dict): + _override = {} + _name = None + _namespace = None + + def __init__(self, connection=None, name=None, parent=None, attrs=None): + if parent is not None and self._namespace is None: + self._namespace = parent._namespace + if connection is not None: + self._connection = connection + self._name = name or self._name or self.__class__.__name__ + self._declared('setup', attrs=attrs) + dict.__init__(self, attrs and attrs.copy() or {}) + + def _declared(self, op, **kw): + def inherit(obj): + result = {} + for cls in getattr(obj, '__bases__', ()): + result.update(inherit(cls)) + result.update(obj.__dict__) + return result + + scope = inherit(self.__class__) + scope.update(self.__dict__) + declared = lambda attr: isinstance(attr[1], DeclarativeType) + for name, node in filter(declared, scope.items()): + getattr(node, op)(self, name, parentname=self._name, **kw) + + @property + def connection(self): + return self._connection + + def __repr__(self): + render = lambda pair: '{0!s}: {1!r}'.format(*pair) + do_show = lambda pair: not pair[0].startswith('_') + attrs = filter(do_show, self.__dict__.items()) + name = self.__class__.__name__ + if name.startswith('JIT_'): + name = '^{0}^'.format(self._name or '') + return '{0}{1!r}({2})'.format( + name, self.copy(), ', '.join(map(render, attrs))) + + def _type_for(self, name, attrs): + return self._override.get(name, globals().get(name, ResponseElement)) + + @strip_namespace + def startElement(self, name, attrs, connection): + attribute = getattr(self, name, None) + if isinstance(attribute, DeclarativeType): + return attribute.start(name=name, attrs=attrs, + connection=connection) + elif attrs.getLength(): + setattr(self, name, ComplexType(attrs.copy())) + else: + return None + + @strip_namespace + def endElement(self, name, value, connection): + attribute = getattr(self, name, None) + if name == self._name: + self._declared('teardown') + elif isinstance(attribute, DeclarativeType): + attribute.end(name=name, value=value, connection=connection) + elif isinstance(attribute, ComplexType): + setattr(attribute, attribute._value, value) + else: + setattr(self, name, value) + + +class Response(ResponseElement): + ResponseMetadata = Element() + + @strip_namespace + def startElement(self, name, attrs, connection): + if name == self._name: + self.update(attrs) + else: + return super(Response, self).startElement(name, attrs, connection) + + @property + def _result(self): + return getattr(self, self._action + 'Result', None) + + @property + def _action(self): + return (self._name or self.__class__.__name__)[:-len('Response')] + + +class ResponseResultList(Response): + _ResultClass = ResponseElement + + def __init__(self, *args, **kw): + setattr(self, self._action + 'Result', ElementList(self._ResultClass)) + super(ResponseResultList, self).__init__(*args, **kw) + + +class FeedSubmissionInfo(ResponseElement): + pass + + +class SubmitFeedResult(ResponseElement): + FeedSubmissionInfo = Element(FeedSubmissionInfo) + + +class GetFeedSubmissionListResult(ResponseElement): + FeedSubmissionInfo = ElementList(FeedSubmissionInfo) + + +class GetFeedSubmissionCountResult(ResponseElement): + pass + + +class CancelFeedSubmissionsResult(GetFeedSubmissionListResult): + pass + + +class GetServiceStatusResult(ResponseElement): + Messages = Element(Messages=ElementList()) + + +class ReportRequestInfo(ResponseElement): + pass + + +class RequestReportResult(ResponseElement): + ReportRequestInfo = Element() + + +class GetReportRequestListResult(RequestReportResult): + ReportRequestInfo = ElementList() + + +class CancelReportRequestsResult(RequestReportResult): + pass + + +class GetReportListResult(ResponseElement): + ReportInfo = ElementList() + + +class ManageReportScheduleResult(ResponseElement): + ReportSchedule = Element() + + +class GetReportScheduleListResult(ManageReportScheduleResult): + pass + + +class UpdateReportAcknowledgementsResult(GetReportListResult): + pass + + +class CreateInboundShipmentPlanResult(ResponseElement): + InboundShipmentPlans = MemberList(ShipToAddress=Element(), + Items=MemberList()) + + +class ListInboundShipmentsResult(ResponseElement): + ShipmentData = MemberList(ShipFromAddress=Element()) + + +class ListInboundShipmentItemsResult(ResponseElement): + ItemData = MemberList() + + +class ListInventorySupplyResult(ResponseElement): + InventorySupplyList = MemberList( + EarliestAvailability=Element(), + SupplyDetail=MemberList( + EarliestAvailableToPick=Element(), + LatestAvailableToPick=Element(), + ) + ) + + +class ComplexAmount(ResponseElement): + _amount = 'Value' + + def __repr__(self): + return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount)) + + def __float__(self): + return float(getattr(self, self._amount)) + + def __str__(self): + return str(getattr(self, self._amount)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('CurrencyCode', self._amount): + message = 'Unrecognized tag {0} in ComplexAmount'.format(name) + raise AssertionError(message) + return super(ComplexAmount, self).startElement(name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == self._amount: + value = Decimal(value) + super(ComplexAmount, self).endElement(name, value, connection) + + +class ComplexMoney(ComplexAmount): + _amount = 'Amount' + + +class ComplexWeight(ResponseElement): + def __repr__(self): + return '{0} {1}'.format(self.Value, self.Unit) + + def __float__(self): + return float(self.Value) + + def __str__(self): + return str(self.Value) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in ('Unit', 'Value'): + message = 'Unrecognized tag {0} in ComplexWeight'.format(name) + raise AssertionError(message) + return super(ComplexWeight, self).startElement(name, attrs, connection) + + @strip_namespace + def endElement(self, name, value, connection): + if name == 'Value': + value = Decimal(value) + super(ComplexWeight, self).endElement(name, value, connection) + + +class Dimension(ComplexType): + _value = 'Value' + + +class ComplexDimensions(ResponseElement): + _dimensions = ('Height', 'Length', 'Width', 'Weight') + + def __repr__(self): + values = [getattr(self, key, None) for key in self._dimensions] + values = filter(None, values) + return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values)) + + @strip_namespace + def startElement(self, name, attrs, connection): + if name not in self._dimensions: + message = 'Unrecognized tag {0} in ComplexDimensions'.format(name) + raise AssertionError(message) + setattr(self, name, Dimension(attrs.copy())) + + @strip_namespace + def endElement(self, name, value, connection): + if name in self._dimensions: + value = Decimal(value or '0') + ResponseElement.endElement(self, name, value, connection) + + +class FulfillmentPreviewItem(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + + +class FulfillmentPreview(ResponseElement): + EstimatedShippingWeight = Element(ComplexWeight) + EstimatedFees = MemberList(Amount=Element(ComplexAmount)) + UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem) + FulfillmentPreviewShipments = MemberList( + FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem), + ) + + +class GetFulfillmentPreviewResult(ResponseElement): + FulfillmentPreviews = MemberList(FulfillmentPreview) + + +class FulfillmentOrder(ResponseElement): + DestinationAddress = Element() + NotificationEmailList = MemberList(SimpleList) + + +class GetFulfillmentOrderResult(ResponseElement): + FulfillmentOrder = Element(FulfillmentOrder) + FulfillmentShipment = MemberList( + FulfillmentShipmentItem=MemberList(), + FulfillmentShipmentPackage=MemberList(), + ) + FulfillmentOrderItem = MemberList() + + +class ListAllFulfillmentOrdersResult(ResponseElement): + FulfillmentOrders = MemberList(FulfillmentOrder) + + +class GetPackageTrackingDetailsResult(ResponseElement): + ShipToAddress = Element() + TrackingEvents = MemberList(EventAddress=Element()) + + +class Image(ResponseElement): + pass + + +class AttributeSet(ResponseElement): + ItemDimensions = Element(ComplexDimensions) + ListPrice = Element(ComplexMoney) + PackageDimensions = Element(ComplexDimensions) + SmallImage = Element(Image) + + +class ItemAttributes(AttributeSet): + Languages = Element(Language=ElementList()) + + def __init__(self, *args, **kw): + names = ('Actor', 'Artist', 'Author', 'Creator', 'Director', + 'Feature', 'Format', 'GemType', 'MaterialType', + 'MediaType', 'OperatingSystem', 'Platform') + for name in names: + setattr(self, name, SimpleList()) + super(ItemAttributes, self).__init__(*args, **kw) + + +class VariationRelationship(ResponseElement): + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + GemType = SimpleList() + MaterialType = SimpleList() + OperatingSystem = SimpleList() + + +class Price(ResponseElement): + LandedPrice = Element(ComplexMoney) + ListingPrice = Element(ComplexMoney) + Shipping = Element(ComplexMoney) + + +class CompetitivePrice(ResponseElement): + Price = Element(Price) + + +class CompetitivePriceList(ResponseElement): + CompetitivePrice = ElementList(CompetitivePrice) + + +class CompetitivePricing(ResponseElement): + CompetitivePrices = Element(CompetitivePriceList) + NumberOfOfferListings = SimpleList() + TradeInValue = Element(ComplexMoney) + + +class SalesRank(ResponseElement): + pass + + +class LowestOfferListing(ResponseElement): + Qualifiers = Element(ShippingTime=Element()) + Price = Element(Price) + + +class Offer(ResponseElement): + BuyingPrice = Element(Price) + RegularPrice = Element(ComplexMoney) + + +class Product(ResponseElement): + _namespace = 'ns2' + Identifiers = Element(MarketplaceASIN=Element(), + SKUIdentifier=Element()) + AttributeSets = Element( + ItemAttributes=ElementList(ItemAttributes), + ) + Relationships = Element( + VariationParent=ElementList(VariationRelationship), + ) + CompetitivePricing = ElementList(CompetitivePricing) + SalesRankings = Element( + SalesRank=ElementList(SalesRank), + ) + LowestOfferListings = Element( + LowestOfferListing=ElementList(LowestOfferListing), + ) + Offers = Element( + Offer=ElementList(Offer), + ) + + +class ListMatchingProductsResult(ResponseElement): + Products = Element(Product=ElementList(Product)) + + +class ProductsBulkOperationResult(ResponseElement): + Product = Element(Product) + Error = Element() + + +class ProductsBulkOperationResponse(ResponseResultList): + _ResultClass = ProductsBulkOperationResult + + +class GetMatchingProductResponse(ProductsBulkOperationResponse): + pass + + +class GetMatchingProductForIdResult(ListMatchingProductsResult): + pass + + +class GetMatchingProductForIdResponse(ResponseResultList): + _ResultClass = GetMatchingProductForIdResult + + +class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForSKUResponse(ProductsBulkOperationResponse): + pass + + +class GetMyPriceForASINResponse(ProductsBulkOperationResponse): + pass + + +class ProductCategory(ResponseElement): + + def __init__(self, *args, **kw): + setattr(self, 'Parent', Element(ProductCategory)) + super(ProductCategory, self).__init__(*args, **kw) + + +class GetProductCategoriesResult(ResponseElement): + Self = ElementList(ProductCategory) + + +class GetProductCategoriesForSKUResult(GetProductCategoriesResult): + pass + + +class GetProductCategoriesForASINResult(GetProductCategoriesResult): + pass + + +class Order(ResponseElement): + OrderTotal = Element(ComplexMoney) + ShippingAddress = Element() + PaymentExecutionDetail = Element( + PaymentExecutionDetailItem=ElementList( + PaymentExecutionDetailItem=Element( + Payment=Element(ComplexMoney) + ) + ) + ) + + +class ListOrdersResult(ResponseElement): + Orders = Element(Order=ElementList(Order)) + + +class GetOrderResult(ListOrdersResult): + pass + + +class OrderItem(ResponseElement): + ItemPrice = Element(ComplexMoney) + ShippingPrice = Element(ComplexMoney) + GiftWrapPrice = Element(ComplexMoney) + ItemTax = Element(ComplexMoney) + ShippingTax = Element(ComplexMoney) + GiftWrapTax = Element(ComplexMoney) + ShippingDiscount = Element(ComplexMoney) + PromotionDiscount = Element(ComplexMoney) + PromotionIds = SimpleList() + CODFee = Element(ComplexMoney) + CODFeeDiscount = Element(ComplexMoney) + + +class ListOrderItemsResult(ResponseElement): + OrderItems = Element(OrderItem=ElementList(OrderItem)) + + +class ListMarketplaceParticipationsResult(ResponseElement): + ListParticipations = Element(Participation=ElementList()) + ListMarketplaces = Element(Marketplace=ElementList()) + + +class ListRecommendationsResult(ResponseElement): + ListingQualityRecommendations = MemberList(ItemIdentifier=Element()) + + +class Customer(ResponseElement): + PrimaryContactInfo = Element() + ShippingAddressList = Element(ShippingAddress=ElementList()) + AssociatedMarketplaces = Element(MarketplaceDomain=ElementList()) + + +class ListCustomersResult(ResponseElement): + CustomerList = Element(Customer=ElementList(Customer)) + + +class GetCustomersForCustomerIdResult(ListCustomersResult): + pass + + +class CartItem(ResponseElement): + CurrentPrice = Element(ComplexMoney) + SalePrice = Element(ComplexMoney) + + +class Cart(ResponseElement): + ActiveCartItemList = Element(CartItem=ElementList(CartItem)) + SavedCartItemList = Element(CartItem=ElementList(CartItem)) + + +class ListCartsResult(ResponseElement): + CartList = Element(Cart=ElementList(Cart)) + + +class GetCartsResult(ListCartsResult): + pass + + +class Destination(ResponseElement): + AttributeList = MemberList() + + +class ListRegisteredDestinationsResult(ResponseElement): + DestinationList = MemberList(Destination) + + +class Subscription(ResponseElement): + Destination = Element(Destination) + + +class GetSubscriptionResult(ResponseElement): + Subscription = Element(Subscription) + + +class ListSubscriptionsResult(ResponseElement): + SubscriptionList = MemberList(Subscription) + + +class OrderReferenceDetails(ResponseElement): + Buyer = Element() + OrderTotal = Element(ComplexMoney) + Destination = Element(PhysicalDestination=Element()) + SellerOrderAttributes = Element() + OrderReferenceStatus = Element() + Constraints = ElementList() + + +class SetOrderReferenceDetailsResult(ResponseElement): + OrderReferenceDetails = Element(OrderReferenceDetails) + + +class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult): + pass + + +class AuthorizationDetails(ResponseElement): + AuthorizationAmount = Element(ComplexMoney) + CapturedAmount = Element(ComplexMoney) + AuthorizationFee = Element(ComplexMoney) + AuthorizationStatus = Element() + + +class AuthorizeResult(ResponseElement): + AuthorizationDetails = Element(AuthorizationDetails) + + +class GetAuthorizationDetailsResult(AuthorizeResult): + pass + + +class CaptureDetails(ResponseElement): + CaptureAmount = Element(ComplexMoney) + RefundedAmount = Element(ComplexMoney) + CaptureFee = Element(ComplexMoney) + CaptureStatus = Element() + + +class CaptureResult(ResponseElement): + CaptureDetails = Element(CaptureDetails) + + +class GetCaptureDetailsResult(CaptureResult): + pass + + +class RefundDetails(ResponseElement): + RefundAmount = Element(ComplexMoney) + FeeRefunded = Element(ComplexMoney) + RefundStatus = Element() + + +class RefundResult(ResponseElement): + RefundDetails = Element(RefundDetails) + + +class GetRefundDetails(RefundResult): + pass diff --git a/ext/boto/opsworks/__init__.py b/ext/boto/opsworks/__init__.py new file mode 100644 index 0000000000..c54a1b1b3e --- /dev/null +++ b/ext/boto/opsworks/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon OpsWorks service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.opsworks.layer1 import OpsWorksConnection + return get_regions('opsworks', connection_cls=OpsWorksConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.opsworks.layer1 import OpsWorksConnection + return connect('opsworks', region_name, + connection_cls=OpsWorksConnection, **kw_params) diff --git a/ext/boto/opsworks/exceptions.py b/ext/boto/opsworks/exceptions.py new file mode 100644 index 0000000000..da23e48521 --- /dev/null +++ b/ext/boto/opsworks/exceptions.py @@ -0,0 +1,30 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ResourceNotFoundException(JSONResponseError): + pass + + +class ValidationException(JSONResponseError): + pass diff --git a/ext/boto/opsworks/layer1.py b/ext/boto/opsworks/layer1.py new file mode 100644 index 0000000000..8894d1c30f --- /dev/null +++ b/ext/boto/opsworks/layer1.py @@ -0,0 +1,3094 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.opsworks import exceptions + + +class OpsWorksConnection(AWSQueryConnection): + """ + AWS OpsWorks + Welcome to the AWS OpsWorks API Reference . This guide provides + descriptions, syntax, and usage examples about AWS OpsWorks + actions and data types, including common parameters and error + codes. + + AWS OpsWorks is an application management service that provides an + integrated experience for overseeing the complete application + lifecycle. For information about this product, go to the `AWS + OpsWorks`_ details page. + + **SDKs and CLI** + + The most common way to use the AWS OpsWorks API is by using the + AWS Command Line Interface (CLI) or by using one of the AWS SDKs + to implement applications in your preferred language. For more + information, see: + + + + `AWS CLI`_ + + `AWS SDK for Java`_ + + `AWS SDK for .NET`_ + + `AWS SDK for PHP 2`_ + + `AWS SDK for Ruby`_ + + `AWS SDK for Node.js`_ + + `AWS SDK for Python(Boto)`_ + + + **Endpoints** + + AWS OpsWorks supports only one endpoint, opsworks.us- + east-1.amazonaws.com (HTTPS), so you must connect to that + endpoint. You can then use the API to direct AWS OpsWorks to + create stacks in any AWS Region. + + **Chef Versions** + + When you call CreateStack, CloneStack, or UpdateStack we recommend + you use the `ConfigurationManager` parameter to specify the Chef + version, 0.9, 11.4, or 11.10. The default value is currently + 11.10. For more information, see `Chef Versions`_. + + You can still specify Chef 0.9 for your stack, but new features + are not available for Chef 0.9 stacks, and support is scheduled to + end on July 24, 2014. We do not recommend using Chef 0.9 for new + stacks, and we recommend migrating your existing Chef 0.9 stacks + to Chef 11.10 as soon as possible. + """ + APIVersion = "2013-02-18" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com" + ServiceName = "OpsWorks" + TargetPrefix = "OpsWorks_20130218" + ResponseError = JSONResponseError + + _faults = { + "ResourceNotFoundException": exceptions.ResourceNotFoundException, + "ValidationException": exceptions.ValidationException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(OpsWorksConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def assign_instance(self, instance_id, layer_ids): + """ + Assign a registered instance to a custom layer. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The layer ID, which must correspond to a custom + layer. You cannot assign a registered instance to a built-in layer. + + """ + params = { + 'InstanceId': instance_id, + 'LayerIds': layer_ids, + } + return self.make_request(action='AssignInstance', + body=json.dumps(params)) + + def assign_volume(self, volume_id, instance_id=None): + """ + Assigns one of the stack's registered Amazon EBS volumes to a + specified instance. The volume must first be registered with + the stack by calling RegisterVolume. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'VolumeId': volume_id, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssignVolume', + body=json.dumps(params)) + + def associate_elastic_ip(self, elastic_ip, instance_id=None): + """ + Associates one of the stack's registered Elastic IP addresses + with a specified instance. The address must first be + registered with the stack by calling RegisterElasticIp. For + more information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'ElasticIp': elastic_ip, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssociateElasticIp', + body=json.dumps(params)) + + def attach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Attaches an Elastic Load Balancing load balancer to a + specified layer. For more information, see `Elastic Load + Balancing`_. + + + You must create the Elastic Load Balancing instance + separately, by using the Elastic Load Balancing console, API, + or CLI. For more information, see ` Elastic Load Balancing + Developer Guide`_. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is to be attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='AttachElasticLoadBalancer', + body=json.dumps(params)) + + def clone_stack(self, source_stack_id, service_role_arn, name=None, + region=None, vpc_id=None, attributes=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, chef_configuration=None, + use_custom_cookbooks=None, + use_opsworks_security_groups=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + clone_permissions=None, clone_app_ids=None, + default_root_device_type=None): + """ + Creates a clone of a specified stack. For more information, + see `Clone a Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type source_stack_id: string + :param source_stack_id: The source stack ID. + + :type name: string + :param name: The cloned stack name. + + :type region: string + :param region: The cloned stack AWS region, such as "us-east-1". For + more information about AWS regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the cloned stack is to be + launched into. It must be in the specified region. All instances + are launched into this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: A list of stack attributes and values as key/value + pairs to be added to the cloned stack. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. If you create a stack by using the AWS OpsWorks + console, it creates the role for you. You can obtain an existing + stack's IAM ARN programmatically by calling DescribePermissions. + For more information about IAM ARNs, see `Using Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the source + stack's service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stacks's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The cloned stack's default + Availability Zone, which must be in the specified region. For more + information, see `Regions and Endpoints`_. If you also specify a + value for `DefaultSubnetId`, the subnet must be in the same zone. + For more information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_ + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether to use custom cookbooks. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. With + `UseOpsworksSecurityGroups` you can instead provide your own custom + security groups. `UseOpsworksSecurityGroups` has the following + settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type clone_permissions: boolean + :param clone_permissions: Whether to clone the source stack's + permissions. + + :type clone_app_ids: list + :param clone_app_ids: A list of source stack app IDs to be included in + the cloned stack. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the cloned stack, but + you can override it when you create an instance. For more + information, see `Storage for the Root Device`_. + + """ + params = { + 'SourceStackId': source_stack_id, + 'ServiceRoleArn': service_role_arn, + } + if name is not None: + params['Name'] = name + if region is not None: + params['Region'] = region + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if clone_permissions is not None: + params['ClonePermissions'] = clone_permissions + if clone_app_ids is not None: + params['CloneAppIds'] = clone_app_ids + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CloneStack', + body=json.dumps(params)) + + def create_app(self, stack_id, name, type, shortname=None, + description=None, data_sources=None, app_source=None, + domains=None, enable_ssl=None, ssl_configuration=None, + attributes=None, environment=None): + """ + Creates an app for a specified stack. For more information, + see `Creating Apps`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type shortname: string + :param shortname: The app's short name. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type data_sources: list + :param data_sources: The app's data source. + + :type type: string + :param type: The app type. Each supported type is associated with a + particular layer. For example, PHP applications are associated with + a PHP layer. AWS OpsWorks deploys an application to those instances + that are members of the corresponding layer. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether to enable SSL for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instance. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + + """ + params = {'StackId': stack_id, 'Name': name, 'Type': type, } + if shortname is not None: + params['Shortname'] = shortname + if description is not None: + params['Description'] = description + if data_sources is not None: + params['DataSources'] = data_sources + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment + return self.make_request(action='CreateApp', + body=json.dumps(params)) + + def create_deployment(self, stack_id, command, app_id=None, + instance_ids=None, comment=None, custom_json=None): + """ + Runs deployment or stack commands. For more information, see + `Deploying Apps`_ and `Run Stack Commands`_. + + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type app_id: string + :param app_id: The app ID. This parameter is required for app + deployments, but not for other deployment commands. + + :type instance_ids: list + :param instance_ids: The instance IDs for the deployment targets. + + :type command: dict + :param command: A `DeploymentCommand` object that specifies the + deployment command and any associated arguments. + + :type comment: string + :param comment: A user-defined comment. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + """ + params = {'StackId': stack_id, 'Command': command, } + if app_id is not None: + params['AppId'] = app_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + if comment is not None: + params['Comment'] = comment + if custom_json is not None: + params['CustomJson'] = custom_json + return self.make_request(action='CreateDeployment', + body=json.dumps(params)) + + def create_instance(self, stack_id, layer_ids, instance_type, + auto_scaling_type=None, hostname=None, os=None, + ami_id=None, ssh_key_name=None, + availability_zone=None, virtualization_type=None, + subnet_id=None, architecture=None, + root_device_type=None, install_updates_on_boot=None, + ebs_optimized=None): + """ + Creates an instance in a specified stack. For more + information, see `Adding an Instance to a Layer`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array that contains the instance layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: For load-based or time-based instances, the + type. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance's operating system, which must be set to one of + the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom` + + + The default option is the current Amazon Linux version. If you set this + parameter to `Custom`, you must use the CreateInstance action's + AmiId parameter to specify the custom AMI that you want to use. For + more information on the standard operating systems, see `Operating + Systems`_For more information on how to use custom AMIs with + OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see + `Instances`_. + + If you specify a custom AMI, you must set `Os` to `Custom`. + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type availability_zone: string + :param availability_zone: The instance Availability Zone. For more + information, see `Regions and Endpoints`_. + + :type virtualization_type: string + :param virtualization_type: The instance's virtualization type, + `paravirtual` or `hvm`. + + :type subnet_id: string + :param subnet_id: The ID of the instance's subnet. If the stack is + running in a VPC, you can use this parameter to override the + stack's default subnet ID value and direct AWS OpsWorks to launch + the instance in a different subnet. + + :type architecture: string + :param architecture: The instance architecture. The default option is + `x86_64`. Instance types do not necessarily support both + architectures. For a list of the architectures that are supported + by the different instance types, see `Instance Families and + Types`_. + + :type root_device_type: string + :param root_device_type: The instance root device type. For more + information, see `Storage for the Root Device`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True` to ensure that + your instances have the latest security updates. + + :type ebs_optimized: boolean + :param ebs_optimized: Whether to create an Amazon EBS-optimized + instance. + + """ + params = { + 'StackId': stack_id, + 'LayerIds': layer_ids, + 'InstanceType': instance_type, + } + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if virtualization_type is not None: + params['VirtualizationType'] = virtualization_type + if subnet_id is not None: + params['SubnetId'] = subnet_id + if architecture is not None: + params['Architecture'] = architecture + if root_device_type is not None: + params['RootDeviceType'] = root_device_type + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if ebs_optimized is not None: + params['EbsOptimized'] = ebs_optimized + return self.make_request(action='CreateInstance', + body=json.dumps(params)) + + def create_layer(self, stack_id, type, name, shortname, attributes=None, + custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, + install_updates_on_boot=None, + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): + """ + Creates a layer. For more information, see `How to Create a + Layer`_. + + + You should use **CreateLayer** for noncustom layer types such + as PHP App Server only if the stack does not have an existing + layer of that type. A stack can have at most one instance of + each noncustom layer; if you attempt to create a second + instance, **CreateLayer** fails. A stack can have an arbitrary + number of custom layers, so you can call **CreateLayer** as + many times as you like for that layer type. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The layer stack ID. + + :type type: string + :param type: The layer type. A stack cannot have more than one built-in + layer of the same type. It can have any number of custom layers. + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorks and by Chef recipes. The short name is also used as the + name for the directory where your app files are installed. It can + have a maximum of 200 characters, which are limited to the + alphanumeric characters, '-', '_', and '.'. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile that to + be used for the layer's EC2 instances. For more information about + IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer custom + security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the layer + packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer's Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type use_ebs_optimized_instances: boolean + :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized + instances. + + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: A LifeCycleEventConfiguration + object that you can use to configure the Shutdown event to specify + an execution timeout and enable or disable Elastic Load Balancer + connection draining. + + """ + params = { + 'StackId': stack_id, + 'Type': type, + 'Name': name, + 'Shortname': shortname, + } + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if use_ebs_optimized_instances is not None: + params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration + return self.make_request(action='CreateLayer', + body=json.dumps(params)) + + def create_stack(self, name, region, service_role_arn, + default_instance_profile_arn, vpc_id=None, + attributes=None, default_os=None, hostname_theme=None, + default_availability_zone=None, default_subnet_id=None, + custom_json=None, configuration_manager=None, + chef_configuration=None, use_custom_cookbooks=None, + use_opsworks_security_groups=None, + custom_cookbooks_source=None, default_ssh_key_name=None, + default_root_device_type=None): + """ + Creates a new stack. For more information, see `Create a New + Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type name: string + :param name: The stack name. + + :type region: string + :param region: The stack AWS region, such as "us-east-1". For more + information about Amazon regions, see `Regions and Endpoints`_. + + :type vpc_id: string + :param vpc_id: The ID of the VPC that the stack is to be launched into. + It must be in the specified region. All instances are launched into + this VPC, and you cannot change the ID later. + + + If your account supports EC2 Classic, the default value is no VPC. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. + + + If the VPC ID corresponds to a default VPC and you have specified + either the `DefaultAvailabilityZone` or the `DefaultSubnetId` + parameter only, AWS OpsWorks infers the value of the other + parameter. If you specify neither parameter, AWS OpsWorks sets + these parameters to the first valid Availability Zone for the + specified region and the corresponding default VPC subnet ID, + respectively. + + If you specify a nondefault VPC ID, note the following: + + + + It must belong to a VPC in your account that is in the specified + region. + + You must specify a value for `DefaultSubnetId`. + + + For more information on how to use AWS OpsWorks with a VPC, see + `Running a Stack in a VPC`_. For more information on default VPC + and EC2 Classic, see `Supported Platforms`_. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type service_role_arn: string + :param service_role_arn: The stack AWS Identity and Access Management + (IAM) role, which allows AWS OpsWorks to work with AWS resources on + your behalf. You must set this parameter to the Amazon Resource + Name (ARN) for an existing IAM role. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see the `VpcId` parameter description. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. With + `UseOpsworksSecurityGroups` you can instead provide your own custom + security groups. `UseOpsworksSecurityGroups` has the following + settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the stack, but you + can override it when you create an instance. The default option is + `instance-store`. For more information, see `Storage for the Root + Device`_. + + """ + params = { + 'Name': name, + 'Region': region, + 'ServiceRoleArn': service_role_arn, + 'DefaultInstanceProfileArn': default_instance_profile_arn, + } + if vpc_id is not None: + params['VpcId'] = vpc_id + if attributes is not None: + params['Attributes'] = attributes + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + return self.make_request(action='CreateStack', + body=json.dumps(params)) + + def create_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None, allow_self_management=None): + """ + Creates a new user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's SSH user name. The allowable characters + are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name + includes other punctuation marks, AWS OpsWorks removes them. For + example, `my.name` will be changed to `myname`. If you do not + specify an SSH user name, AWS OpsWorks generates one from the IAM + user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's public SSH key. + + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + `Setting an IAM User's Public SSH Key`_. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management + return self.make_request(action='CreateUserProfile', + body=json.dumps(params)) + + def delete_app(self, app_id): + """ + Deletes a specified app. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type app_id: string + :param app_id: The app ID. + + """ + params = {'AppId': app_id, } + return self.make_request(action='DeleteApp', + body=json.dumps(params)) + + def delete_instance(self, instance_id, delete_elastic_ip=None, + delete_volumes=None): + """ + Deletes a specified instance, which terminates the associated + Amazon EC2 instance. You must stop an instance before you can + delete it. + + For more information, see `Deleting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type delete_elastic_ip: boolean + :param delete_elastic_ip: Whether to delete the instance Elastic IP + address. + + :type delete_volumes: boolean + :param delete_volumes: Whether to delete the instance's Amazon EBS + volumes. + + """ + params = {'InstanceId': instance_id, } + if delete_elastic_ip is not None: + params['DeleteElasticIp'] = delete_elastic_ip + if delete_volumes is not None: + params['DeleteVolumes'] = delete_volumes + return self.make_request(action='DeleteInstance', + body=json.dumps(params)) + + def delete_layer(self, layer_id): + """ + Deletes a specified layer. You must first stop and then delete + all associated instances or unassign registered instances. For + more information, see `How to Delete a Layer`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='DeleteLayer', + body=json.dumps(params)) + + def delete_stack(self, stack_id): + """ + Deletes a specified stack. You must first delete all + instances, layers, and apps or deregister registered + instances. For more information, see `Shut Down a Stack`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DeleteStack', + body=json.dumps(params)) + + def delete_user_profile(self, iam_user_arn): + """ + Deletes a user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + """ + params = {'IamUserArn': iam_user_arn, } + return self.make_request(action='DeleteUserProfile', + body=json.dumps(params)) + + def deregister_elastic_ip(self, elastic_ip): + """ + Deregisters a specified Elastic IP address. The address can + then be registered by another stack. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DeregisterElasticIp', + body=json.dumps(params)) + + def deregister_instance(self, instance_id): + """ + Deregister a registered Amazon EC2 or on-premises instance. + This action removes the instance from the stack and returns it + to your control. This action can not be used with instances + that were created with AWS OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='DeregisterInstance', + body=json.dumps(params)) + + def deregister_rds_db_instance(self, rds_db_instance_arn): + """ + Deregisters an Amazon RDS instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + """ + params = {'RdsDbInstanceArn': rds_db_instance_arn, } + return self.make_request(action='DeregisterRdsDbInstance', + body=json.dumps(params)) + + def deregister_volume(self, volume_id): + """ + Deregisters an Amazon EBS volume. The volume can then be + registered by another stack. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='DeregisterVolume', + body=json.dumps(params)) + + def describe_apps(self, stack_id=None, app_ids=None): + """ + Requests a description of a specified set of apps. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The app stack ID. If you use this parameter, + `DescribeApps` returns a description of the apps in the specified + stack. + + :type app_ids: list + :param app_ids: An array of app IDs for the apps to be described. If + you use this parameter, `DescribeApps` returns a description of the + specified apps. Otherwise, it returns a description of every app. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_ids is not None: + params['AppIds'] = app_ids + return self.make_request(action='DescribeApps', + body=json.dumps(params)) + + def describe_commands(self, deployment_id=None, instance_id=None, + command_ids=None): + """ + Describes the results of specified commands. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type deployment_id: string + :param deployment_id: The deployment ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified deployment. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeCommands` returns a description of the commands associated + with the specified instance. + + :type command_ids: list + :param command_ids: An array of command IDs. If you include this + parameter, `DescribeCommands` returns a description of the + specified commands. Otherwise, it returns a description of every + command. + + """ + params = {} + if deployment_id is not None: + params['DeploymentId'] = deployment_id + if instance_id is not None: + params['InstanceId'] = instance_id + if command_ids is not None: + params['CommandIds'] = command_ids + return self.make_request(action='DescribeCommands', + body=json.dumps(params)) + + def describe_deployments(self, stack_id=None, app_id=None, + deployment_ids=None): + """ + Requests a description of a specified set of deployments. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified stack. + + :type app_id: string + :param app_id: The app ID. If you include this parameter, + `DescribeDeployments` returns a description of the commands + associated with the specified app. + + :type deployment_ids: list + :param deployment_ids: An array of deployment IDs to be described. If + you include this parameter, `DescribeDeployments` returns a + description of the specified deployments. Otherwise, it returns a + description of every deployment. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if app_id is not None: + params['AppId'] = app_id + if deployment_ids is not None: + params['DeploymentIds'] = deployment_ids + return self.make_request(action='DescribeDeployments', + body=json.dumps(params)) + + def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None): + """ + Describes `Elastic IP addresses`_. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses associated with the specified instance. + + :type stack_id: string + :param stack_id: A stack ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses that are registered with the specified stack. + + :type ips: list + :param ips: An array of Elastic IP addresses to be described. If you + include this parameter, `DescribeElasticIps` returns a description + of the specified Elastic IP addresses. Otherwise, it returns a + description of every Elastic IP address. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if ips is not None: + params['Ips'] = ips + return self.make_request(action='DescribeElasticIps', + body=json.dumps(params)) + + def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None): + """ + Describes a stack's Elastic Load Balancing instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: A stack ID. The action describes the stack's Elastic + Load Balancing instances. + + :type layer_ids: list + :param layer_ids: A list of layer IDs. The action describes the Elastic + Load Balancing instances for the specified layers. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeElasticLoadBalancers', + body=json.dumps(params)) + + def describe_instances(self, stack_id=None, layer_id=None, + instance_ids=None): + """ + Requests a description of a set of instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: A stack ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified stack. + + :type layer_id: string + :param layer_id: A layer ID. If you use this parameter, + `DescribeInstances` returns descriptions of the instances + associated with the specified layer. + + :type instance_ids: list + :param instance_ids: An array of instance IDs to be described. If you + use this parameter, `DescribeInstances` returns a description of + the specified instances. Otherwise, it returns a description of + every instance. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_id is not None: + params['LayerId'] = layer_id + if instance_ids is not None: + params['InstanceIds'] = instance_ids + return self.make_request(action='DescribeInstances', + body=json.dumps(params)) + + def describe_layers(self, stack_id=None, layer_ids=None): + """ + Requests a description of one or more layers in a specified + stack. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type layer_ids: list + :param layer_ids: An array of layer IDs that specify the layers to be + described. If you omit this parameter, `DescribeLayers` returns a + description of every layer in the specified stack. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if layer_ids is not None: + params['LayerIds'] = layer_ids + return self.make_request(action='DescribeLayers', + body=json.dumps(params)) + + def describe_load_based_auto_scaling(self, layer_ids): + """ + Describes load-based auto scaling configurations for specified + layers. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type layer_ids: list + :param layer_ids: An array of layer IDs. + + """ + params = {'LayerIds': layer_ids, } + return self.make_request(action='DescribeLoadBasedAutoScaling', + body=json.dumps(params)) + + def describe_my_user_profile(self): + """ + Describes a user's SSH information. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + + """ + params = {} + return self.make_request(action='DescribeMyUserProfile', + body=json.dumps(params)) + + def describe_permissions(self, iam_user_arn=None, stack_id=None): + """ + Describes the permissions for a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. For more information about IAM + ARNs, see `Using Identifiers`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {} + if iam_user_arn is not None: + params['IamUserArn'] = iam_user_arn + if stack_id is not None: + params['StackId'] = stack_id + return self.make_request(action='DescribePermissions', + body=json.dumps(params)) + + def describe_raid_arrays(self, instance_id=None, stack_id=None, + raid_array_ids=None): + """ + Describe an instance's RAID arrays. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeRaidArrays` returns descriptions of the RAID arrays + associated with the specified instance. + + :type stack_id: string + :param stack_id: The stack ID. + + :type raid_array_ids: list + :param raid_array_ids: An array of RAID array IDs. If you use this + parameter, `DescribeRaidArrays` returns descriptions of the + specified arrays. Otherwise, it returns a description of every + array. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if raid_array_ids is not None: + params['RaidArrayIds'] = raid_array_ids + return self.make_request(action='DescribeRaidArrays', + body=json.dumps(params)) + + def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None): + """ + Describes Amazon RDS instances. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID that the instances are registered with. + The operation returns descriptions of all registered Amazon RDS + instances. + + :type rds_db_instance_arns: list + :param rds_db_instance_arns: An array containing the ARNs of the + instances to be described. + + """ + params = {'StackId': stack_id, } + if rds_db_instance_arns is not None: + params['RdsDbInstanceArns'] = rds_db_instance_arns + return self.make_request(action='DescribeRdsDbInstances', + body=json.dumps(params)) + + def describe_service_errors(self, stack_id=None, instance_id=None, + service_error_ids=None): + """ + Describes AWS OpsWorks service errors. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified stack. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeServiceErrors` returns descriptions of the errors + associated with the specified instance. + + :type service_error_ids: list + :param service_error_ids: An array of service error IDs. If you use + this parameter, `DescribeServiceErrors` returns descriptions of the + specified errors. Otherwise, it returns a description of every + error. + + """ + params = {} + if stack_id is not None: + params['StackId'] = stack_id + if instance_id is not None: + params['InstanceId'] = instance_id + if service_error_ids is not None: + params['ServiceErrorIds'] = service_error_ids + return self.make_request(action='DescribeServiceErrors', + body=json.dumps(params)) + + def describe_stack_provisioning_parameters(self, stack_id): + """ + Requests a description of a stack's provisioning parameters. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the stack + or an attached policy that explicitly grants permissions. For + more information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackProvisioningParameters', + body=json.dumps(params)) + + def describe_stack_summary(self, stack_id): + """ + Describes the number of layers and apps in a specified stack, + and the number of instances in each state, such as + `running_setup` or `online`. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='DescribeStackSummary', + body=json.dumps(params)) + + def describe_stacks(self, stack_ids=None): + """ + Requests a description of one or more stacks. + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type stack_ids: list + :param stack_ids: An array of stack IDs that specify the stacks to be + described. If you omit this parameter, `DescribeStacks` returns a + description of every stack. + + """ + params = {} + if stack_ids is not None: + params['StackIds'] = stack_ids + return self.make_request(action='DescribeStacks', + body=json.dumps(params)) + + def describe_time_based_auto_scaling(self, instance_ids): + """ + Describes time-based auto scaling configurations for specified + instances. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_ids: list + :param instance_ids: An array of instance IDs. + + """ + params = {'InstanceIds': instance_ids, } + return self.make_request(action='DescribeTimeBasedAutoScaling', + body=json.dumps(params)) + + def describe_user_profiles(self, iam_user_arns=None): + """ + Describe specified users. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arns: list + :param iam_user_arns: An array of IAM user ARNs that identify the users + to be described. + + """ + params = {} + if iam_user_arns is not None: + params['IamUserArns'] = iam_user_arns + return self.make_request(action='DescribeUserProfiles', + body=json.dumps(params)) + + def describe_volumes(self, instance_id=None, stack_id=None, + raid_array_id=None, volume_ids=None): + """ + Describes an instance's Amazon EBS volumes. + + + You must specify at least one of the parameters. + + + **Required Permissions**: To use this action, an IAM user must + have a Show, Deploy, or Manage permissions level for the + stack, or an attached policy that explicitly grants + permissions. For more information on user permissions, see + `Managing User Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified instance. + + :type stack_id: string + :param stack_id: A stack ID. The action describes the stack's + registered Amazon EBS volumes. + + :type raid_array_id: string + :param raid_array_id: The RAID array ID. If you use this parameter, + `DescribeVolumes` returns descriptions of the volumes associated + with the specified RAID array. + + :type volume_ids: list + :param volume_ids: Am array of volume IDs. If you use this parameter, + `DescribeVolumes` returns descriptions of the specified volumes. + Otherwise, it returns a description of every volume. + + """ + params = {} + if instance_id is not None: + params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id + if raid_array_id is not None: + params['RaidArrayId'] = raid_array_id + if volume_ids is not None: + params['VolumeIds'] = volume_ids + return self.make_request(action='DescribeVolumes', + body=json.dumps(params)) + + def detach_elastic_load_balancer(self, elastic_load_balancer_name, + layer_id): + """ + Detaches a specified Elastic Load Balancing instance from its + layer. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_load_balancer_name: string + :param elastic_load_balancer_name: The Elastic Load Balancing + instance's name. + + :type layer_id: string + :param layer_id: The ID of the layer that the Elastic Load Balancing + instance is attached to. + + """ + params = { + 'ElasticLoadBalancerName': elastic_load_balancer_name, + 'LayerId': layer_id, + } + return self.make_request(action='DetachElasticLoadBalancer', + body=json.dumps(params)) + + def disassociate_elastic_ip(self, elastic_ip): + """ + Disassociates an Elastic IP address from its instance. The + address remains registered with the stack. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DisassociateElasticIp', + body=json.dumps(params)) + + def get_hostname_suggestion(self, layer_id): + """ + Gets a generated host name for the specified layer, based on + the current host name theme. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + """ + params = {'LayerId': layer_id, } + return self.make_request(action='GetHostnameSuggestion', + body=json.dumps(params)) + + def reboot_instance(self, instance_id): + """ + Reboots a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='RebootInstance', + body=json.dumps(params)) + + def register_elastic_ip(self, elastic_ip, stack_id): + """ + Registers an Elastic IP address with a specified stack. An + address can be registered with only one stack at a time. If + the address is already registered, you must first deregister + it by calling DeregisterElasticIp. For more information, see + `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'ElasticIp': elastic_ip, 'StackId': stack_id, } + return self.make_request(action='RegisterElasticIp', + body=json.dumps(params)) + + def register_instance(self, stack_id, hostname=None, public_ip=None, + private_ip=None, rsa_public_key=None, + rsa_public_key_fingerprint=None, + instance_identity=None): + """ + Registers instances with a specified stack that were created + outside of AWS OpsWorks. + + We do not recommend using this action to register instances. + The complete registration operation has two primary steps, + installing the AWS OpsWorks agent on the instance and + registering the instance with the stack. `RegisterInstance` + handles only the second step. You should instead use the AWS + CLI `register` command, which performs the entire registration + operation. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The ID of the stack that the instance is to be + registered with. + + :type hostname: string + :param hostname: The instance's hostname. + + :type public_ip: string + :param public_ip: The instance's public IP address. + + :type private_ip: string + :param private_ip: The instance's private IP address. + + :type rsa_public_key: string + :param rsa_public_key: The instances public RSA key. This key is used + to encrypt communication between the instance and the service. + + :type rsa_public_key_fingerprint: string + :param rsa_public_key_fingerprint: The instances public RSA key + fingerprint. + + :type instance_identity: dict + :param instance_identity: An InstanceIdentity object that contains the + instance's identity. + + """ + params = {'StackId': stack_id, } + if hostname is not None: + params['Hostname'] = hostname + if public_ip is not None: + params['PublicIp'] = public_ip + if private_ip is not None: + params['PrivateIp'] = private_ip + if rsa_public_key is not None: + params['RsaPublicKey'] = rsa_public_key + if rsa_public_key_fingerprint is not None: + params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint + if instance_identity is not None: + params['InstanceIdentity'] = instance_identity + return self.make_request(action='RegisterInstance', + body=json.dumps(params)) + + def register_rds_db_instance(self, stack_id, rds_db_instance_arn, + db_user, db_password): + """ + Registers an Amazon RDS instance with a stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + :type db_user: string + :param db_user: The database's master user name. + + :type db_password: string + :param db_password: The database password. + + """ + params = { + 'StackId': stack_id, + 'RdsDbInstanceArn': rds_db_instance_arn, + 'DbUser': db_user, + 'DbPassword': db_password, + } + return self.make_request(action='RegisterRdsDbInstance', + body=json.dumps(params)) + + def register_volume(self, stack_id, ec_2_volume_id=None): + """ + Registers an Amazon EBS volume with a specified stack. A + volume can be registered with only one stack at a time. If the + volume is already registered, you must first deregister it by + calling DeregisterVolume. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type ec_2_volume_id: string + :param ec_2_volume_id: The Amazon EBS volume ID. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + if ec_2_volume_id is not None: + params['Ec2VolumeId'] = ec_2_volume_id + return self.make_request(action='RegisterVolume', + body=json.dumps(params)) + + def set_load_based_auto_scaling(self, layer_id, enable=None, + up_scaling=None, down_scaling=None): + """ + Specify the load-based auto scaling configuration for a + specified layer. For more information, see `Managing Load with + Time-based and Load-based Instances`_. + + + To use load-based auto scaling, you must create a set of load- + based auto scaling instances. Load-based auto scaling operates + only on the instances from that set, so you must ensure that + you have created enough instances to handle the maximum + anticipated load. + + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + :type enable: boolean + :param enable: Enables load-based auto scaling for the layer. + + :type up_scaling: dict + :param up_scaling: An `AutoScalingThresholds` object with the upscaling + threshold configuration. If the load exceeds these thresholds for a + specified amount of time, AWS OpsWorks starts a specified number of + instances. + + :type down_scaling: dict + :param down_scaling: An `AutoScalingThresholds` object with the + downscaling threshold configuration. If the load falls below these + thresholds for a specified amount of time, AWS OpsWorks stops a + specified number of instances. + + """ + params = {'LayerId': layer_id, } + if enable is not None: + params['Enable'] = enable + if up_scaling is not None: + params['UpScaling'] = up_scaling + if down_scaling is not None: + params['DownScaling'] = down_scaling + return self.make_request(action='SetLoadBasedAutoScaling', + body=json.dumps(params)) + + def set_permission(self, stack_id, iam_user_arn, allow_ssh=None, + allow_sudo=None, level=None): + """ + Specifies a user's permissions. For more information, see + `Security and Permissions`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type iam_user_arn: string + :param iam_user_arn: The user's IAM ARN. + + :type allow_ssh: boolean + :param allow_ssh: The user is allowed to use SSH to communicate with + the instance. + + :type allow_sudo: boolean + :param allow_sudo: The user is allowed to use **sudo** to elevate + privileges. + + :type level: string + :param level: The user's permission level, which must be set to one of + the following strings. You cannot set your own permissions level. + + + `deny` + + `show` + + `deploy` + + `manage` + + `iam_only` + + + For more information on the permissions associated with these levels, + see `Managing User Permissions`_ + + """ + params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, } + if allow_ssh is not None: + params['AllowSsh'] = allow_ssh + if allow_sudo is not None: + params['AllowSudo'] = allow_sudo + if level is not None: + params['Level'] = level + return self.make_request(action='SetPermission', + body=json.dumps(params)) + + def set_time_based_auto_scaling(self, instance_id, + auto_scaling_schedule=None): + """ + Specify the time-based auto scaling configuration for a + specified instance. For more information, see `Managing Load + with Time-based and Load-based Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type auto_scaling_schedule: dict + :param auto_scaling_schedule: An `AutoScalingSchedule` with the + instance schedule. + + """ + params = {'InstanceId': instance_id, } + if auto_scaling_schedule is not None: + params['AutoScalingSchedule'] = auto_scaling_schedule + return self.make_request(action='SetTimeBasedAutoScaling', + body=json.dumps(params)) + + def start_instance(self, instance_id): + """ + Starts a specified instance. For more information, see + `Starting, Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StartInstance', + body=json.dumps(params)) + + def start_stack(self, stack_id): + """ + Starts a stack's instances. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StartStack', + body=json.dumps(params)) + + def stop_instance(self, instance_id): + """ + Stops a specified instance. When you stop a standard instance, + the data disappears and must be reinstalled when you restart + the instance. You can stop an Amazon EBS-backed instance + without losing data. For more information, see `Starting, + Stopping, and Rebooting Instances`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='StopInstance', + body=json.dumps(params)) + + def stop_stack(self, stack_id): + """ + Stops a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + return self.make_request(action='StopStack', + body=json.dumps(params)) + + def unassign_instance(self, instance_id): + """ + Unassigns a registered instance from all of it's layers. The + instance remains in the stack as an unassigned instance and + can be assigned to another layer, as needed. You cannot use + this action with instances that were created with AWS + OpsWorks. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'InstanceId': instance_id, } + return self.make_request(action='UnassignInstance', + body=json.dumps(params)) + + def unassign_volume(self, volume_id): + """ + Unassigns an assigned Amazon EBS volume. The volume remains + registered with the stack. For more information, see `Resource + Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='UnassignVolume', + body=json.dumps(params)) + + def update_app(self, app_id, name=None, description=None, + data_sources=None, type=None, app_source=None, + domains=None, enable_ssl=None, ssl_configuration=None, + attributes=None, environment=None): + """ + Updates a specified app. + + **Required Permissions**: To use this action, an IAM user must + have a Deploy or Manage permissions level for the stack, or an + attached policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type app_id: string + :param app_id: The app ID. + + :type name: string + :param name: The app name. + + :type description: string + :param description: A description of the app. + + :type data_sources: list + :param data_sources: The app's data sources. + + :type type: string + :param type: The app type. + + :type app_source: dict + :param app_source: A `Source` object that specifies the app repository. + + :type domains: list + :param domains: The app's virtual host settings, with multiple domains + separated by commas. For example: `'www.example.com, example.com'` + + :type enable_ssl: boolean + :param enable_ssl: Whether SSL is enabled for the app. + + :type ssl_configuration: dict + :param ssl_configuration: An `SslConfiguration` object with the SSL + configuration. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type environment: list + :param environment: + An array of `EnvironmentVariable` objects that specify environment + variables to be associated with the app. You can specify up to ten + environment variables. After you deploy the app, these variables + are defined on the associated app server instances. + + This parameter is supported only by Chef 11.10 stacks. If you have + specified one or more environment variables, you cannot modify the + stack's Chef version. + + """ + params = {'AppId': app_id, } + if name is not None: + params['Name'] = name + if description is not None: + params['Description'] = description + if data_sources is not None: + params['DataSources'] = data_sources + if type is not None: + params['Type'] = type + if app_source is not None: + params['AppSource'] = app_source + if domains is not None: + params['Domains'] = domains + if enable_ssl is not None: + params['EnableSsl'] = enable_ssl + if ssl_configuration is not None: + params['SslConfiguration'] = ssl_configuration + if attributes is not None: + params['Attributes'] = attributes + if environment is not None: + params['Environment'] = environment + return self.make_request(action='UpdateApp', + body=json.dumps(params)) + + def update_elastic_ip(self, elastic_ip, name=None): + """ + Updates a registered Elastic IP address's name. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type elastic_ip: string + :param elastic_ip: The address. + + :type name: string + :param name: The new name. + + """ + params = {'ElasticIp': elastic_ip, } + if name is not None: + params['Name'] = name + return self.make_request(action='UpdateElasticIp', + body=json.dumps(params)) + + def update_instance(self, instance_id, layer_ids=None, + instance_type=None, auto_scaling_type=None, + hostname=None, os=None, ami_id=None, + ssh_key_name=None, architecture=None, + install_updates_on_boot=None, ebs_optimized=None): + """ + Updates a specified instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type instance_id: string + :param instance_id: The instance ID. + + :type layer_ids: list + :param layer_ids: The instance's layer IDs. + + :type instance_type: string + :param instance_type: The instance type. AWS OpsWorks supports all + instance types except Cluster Compute, Cluster GPU, and High Memory + Cluster. For more information, see `Instance Families and Types`_. + The parameter values that you use to specify the various types are + in the API Name column of the Available Instance Types table. + + :type auto_scaling_type: string + :param auto_scaling_type: For load-based or time-based instances, the + type. + + :type hostname: string + :param hostname: The instance host name. + + :type os: string + :param os: The instance's operating system, which must be set to one of + the following. + + + Standard operating systems: An Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom` + + + The default option is the current Amazon Linux version, such as `Amazon + Linux 2014.09`. If you set this parameter to `Custom`, you must use + the CreateInstance action's AmiId parameter to specify the custom + AMI that you want to use. For more information on the standard + operating systems, see `Operating Systems`_For more information on + how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_. + + :type ami_id: string + :param ami_id: + A custom AMI ID to be used to create the instance. The AMI should be + based on one of the standard AWS OpsWorks AMIs: Amazon Linux, + Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see + `Instances`_ + + If you specify a custom AMI, you must set `Os` to `Custom`. + + :type ssh_key_name: string + :param ssh_key_name: The instance SSH key name. + + :type architecture: string + :param architecture: The instance architecture. Instance types do not + necessarily support both architectures. For a list of the + architectures that are supported by the different instance types, + see `Instance Families and Types`_. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type ebs_optimized: boolean + :param ebs_optimized: Whether this is an Amazon EBS-optimized instance. + + """ + params = {'InstanceId': instance_id, } + if layer_ids is not None: + params['LayerIds'] = layer_ids + if instance_type is not None: + params['InstanceType'] = instance_type + if auto_scaling_type is not None: + params['AutoScalingType'] = auto_scaling_type + if hostname is not None: + params['Hostname'] = hostname + if os is not None: + params['Os'] = os + if ami_id is not None: + params['AmiId'] = ami_id + if ssh_key_name is not None: + params['SshKeyName'] = ssh_key_name + if architecture is not None: + params['Architecture'] = architecture + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if ebs_optimized is not None: + params['EbsOptimized'] = ebs_optimized + return self.make_request(action='UpdateInstance', + body=json.dumps(params)) + + def update_layer(self, layer_id, name=None, shortname=None, + attributes=None, custom_instance_profile_arn=None, + custom_security_group_ids=None, packages=None, + volume_configurations=None, enable_auto_healing=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, + install_updates_on_boot=None, + use_ebs_optimized_instances=None, + lifecycle_event_configuration=None): + """ + Updates a specified layer. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type layer_id: string + :param layer_id: The layer ID. + + :type name: string + :param name: The layer name, which is used by the console. + + :type shortname: string + :param shortname: The layer short name, which is used internally by AWS + OpsWorksand by Chef. The short name is also used as the name for + the directory where your app files are installed. It can have a + maximum of 200 characters and must be in the following format: + /\A[a-z0-9\-\_\.]+\Z/. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type custom_instance_profile_arn: string + :param custom_instance_profile_arn: The ARN of an IAM profile to be + used for all of the layer's EC2 instances. For more information + about IAM ARNs, see `Using Identifiers`_. + + :type custom_security_group_ids: list + :param custom_security_group_ids: An array containing the layer's + custom security group IDs. + + :type packages: list + :param packages: An array of `Package` objects that describe the + layer's packages. + + :type volume_configurations: list + :param volume_configurations: A `VolumeConfigurations` object that + describes the layer's Amazon EBS volumes. + + :type enable_auto_healing: boolean + :param enable_auto_healing: Whether to disable auto healing for the + layer. + + :type auto_assign_elastic_ips: boolean + :param auto_assign_elastic_ips: Whether to automatically assign an + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. + + :type custom_recipes: dict + :param custom_recipes: A `LayerCustomRecipes` object that specifies the + layer's custom recipes. + + :type install_updates_on_boot: boolean + :param install_updates_on_boot: + Whether to install operating system and package updates when the + instance boots. The default value is `True`. To control when + updates are installed, set this value to `False`. You must then + update your instances manually by using CreateDeployment to run the + `update_dependencies` stack command or manually running `yum` + (Amazon Linux) or `apt-get` (Ubuntu) on the instances. + + + We strongly recommend using the default value of `True`, to ensure that + your instances have the latest security updates. + + :type use_ebs_optimized_instances: boolean + :param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized + instances. + + :type lifecycle_event_configuration: dict + :param lifecycle_event_configuration: + + """ + params = {'LayerId': layer_id, } + if name is not None: + params['Name'] = name + if shortname is not None: + params['Shortname'] = shortname + if attributes is not None: + params['Attributes'] = attributes + if custom_instance_profile_arn is not None: + params['CustomInstanceProfileArn'] = custom_instance_profile_arn + if custom_security_group_ids is not None: + params['CustomSecurityGroupIds'] = custom_security_group_ids + if packages is not None: + params['Packages'] = packages + if volume_configurations is not None: + params['VolumeConfigurations'] = volume_configurations + if enable_auto_healing is not None: + params['EnableAutoHealing'] = enable_auto_healing + if auto_assign_elastic_ips is not None: + params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips + if custom_recipes is not None: + params['CustomRecipes'] = custom_recipes + if install_updates_on_boot is not None: + params['InstallUpdatesOnBoot'] = install_updates_on_boot + if use_ebs_optimized_instances is not None: + params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances + if lifecycle_event_configuration is not None: + params['LifecycleEventConfiguration'] = lifecycle_event_configuration + return self.make_request(action='UpdateLayer', + body=json.dumps(params)) + + def update_my_user_profile(self, ssh_public_key=None): + """ + Updates a user's SSH public key. + + **Required Permissions**: To use this action, an IAM user must + have self-management enabled or an attached policy that + explicitly grants permissions. For more information on user + permissions, see `Managing User Permissions`_. + + :type ssh_public_key: string + :param ssh_public_key: The user's SSH public key. + + """ + params = {} + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + return self.make_request(action='UpdateMyUserProfile', + body=json.dumps(params)) + + def update_rds_db_instance(self, rds_db_instance_arn, db_user=None, + db_password=None): + """ + Updates an Amazon RDS instance. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type rds_db_instance_arn: string + :param rds_db_instance_arn: The Amazon RDS instance's ARN. + + :type db_user: string + :param db_user: The master user name. + + :type db_password: string + :param db_password: The database password. + + """ + params = {'RdsDbInstanceArn': rds_db_instance_arn, } + if db_user is not None: + params['DbUser'] = db_user + if db_password is not None: + params['DbPassword'] = db_password + return self.make_request(action='UpdateRdsDbInstance', + body=json.dumps(params)) + + def update_stack(self, stack_id, name=None, attributes=None, + service_role_arn=None, + default_instance_profile_arn=None, default_os=None, + hostname_theme=None, default_availability_zone=None, + default_subnet_id=None, custom_json=None, + configuration_manager=None, chef_configuration=None, + use_custom_cookbooks=None, custom_cookbooks_source=None, + default_ssh_key_name=None, + default_root_device_type=None, + use_opsworks_security_groups=None): + """ + Updates a specified stack. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type stack_id: string + :param stack_id: The stack ID. + + :type name: string + :param name: The stack's new name. + + :type attributes: map + :param attributes: One or more user-defined key/value pairs to be added + to the stack attributes. + + :type service_role_arn: string + :param service_role_arn: + The stack AWS Identity and Access Management (IAM) role, which allows + AWS OpsWorks to work with AWS resources on your behalf. You must + set this parameter to the Amazon Resource Name (ARN) for an + existing IAM role. For more information about IAM ARNs, see `Using + Identifiers`_. + + + You must set this parameter to a valid service role ARN or the action + will fail; there is no default value. You can specify the stack's + current service role ARN, if you prefer, but you must do so + explicitly. + + :type default_instance_profile_arn: string + :param default_instance_profile_arn: The ARN of an IAM profile that is + the default profile for all of the stack's EC2 instances. For more + information about IAM ARNs, see `Using Identifiers`_. + + :type default_os: string + :param default_os: The stack's operating system, which must be set to + one of the following. + + + Standard operating systems: an Amazon Linux version such as `Amazon + Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`. + + Custom AMIs: `Custom`. You specify the custom AMI you want to use + when you create instances. + + + The default option is the current Amazon Linux version. + + :type hostname_theme: string + :param hostname_theme: The stack's new host name theme, with spaces are + replaced by underscores. The theme is used to generate host names + for the stack's instances. By default, `HostnameTheme` is set to + `Layer_Dependent`, which creates host names by appending integers + to the layer's short name. The other themes are: + + + `Baked_Goods` + + `Clouds` + + `European_Cities` + + `Fruits` + + `Greek_Deities` + + `Legendary_Creatures_from_Japan` + + `Planets_and_Moons` + + `Roman_Deities` + + `Scottish_Islands` + + `US_Cities` + + `Wild_Cats` + + + To obtain a generated host name, call `GetHostNameSuggestion`, which + returns a host name based on the current theme. + + :type default_availability_zone: string + :param default_availability_zone: The stack's default Availability + Zone, which must be in the specified region. For more information, + see `Regions and Endpoints`_. If you also specify a value for + `DefaultSubnetId`, the subnet must be in the same zone. For more + information, see CreateStack. + + :type default_subnet_id: string + :param default_subnet_id: The stack's default VPC subnet ID. This + parameter is required if you specify a value for the `VpcId` + parameter. All instances are launched into this subnet unless you + specify otherwise when you create the instance. If you also specify + a value for `DefaultAvailabilityZone`, the subnet must be in that + zone. For information on default values and when this parameter is + required, see the `VpcId` parameter description. + + :type custom_json: string + :param custom_json: A string that contains user-defined, custom JSON. + It is used to override the corresponding default stack + configuration JSON values. The string should be in the following + format and must escape characters such as '"'.: + `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"` + + For more information on custom JSON, see `Use Custom JSON to Modify the + Stack Configuration JSON`_. + + :type configuration_manager: dict + :param configuration_manager: The configuration manager. When you clone + a stack we recommend that you use the configuration manager to + specify the Chef version, 0.9, 11.4, or 11.10. The default value is + currently 11.4. + + :type chef_configuration: dict + :param chef_configuration: A `ChefConfiguration` object that specifies + whether to enable Berkshelf and the Berkshelf version on Chef 11.10 + stacks. For more information, see `Create a New Stack`_. + + :type use_custom_cookbooks: boolean + :param use_custom_cookbooks: Whether the stack uses custom cookbooks. + + :type custom_cookbooks_source: dict + :param custom_cookbooks_source: Contains the information required to + retrieve an app or cookbook from a repository. For more + information, see `Creating Apps`_ or `Custom Recipes and + Cookbooks`_. + + :type default_ssh_key_name: string + :param default_ssh_key_name: A default SSH key for the stack instances. + You can override this value when you create or update an instance. + + :type default_root_device_type: string + :param default_root_device_type: The default root device type. This + value is used by default for all instances in the stack, but you + can override it when you create an instance. For more information, + see `Storage for the Root Device`_. + + :type use_opsworks_security_groups: boolean + :param use_opsworks_security_groups: Whether to associate the AWS + OpsWorks built-in security groups with the stack's layers. + AWS OpsWorks provides a standard set of built-in security groups, one + for each layer, which are associated with layers by default. + `UseOpsworksSecurityGroups` allows you to instead provide your own + custom security groups. `UseOpsworksSecurityGroups` has the + following settings: + + + + True - AWS OpsWorks automatically associates the appropriate built-in + security group with each layer (default setting). You can associate + additional security groups with a layer after you create it but you + cannot delete the built-in security group. + + False - AWS OpsWorks does not associate built-in security groups with + layers. You must create appropriate EC2 security groups and + associate a security group with each layer that you create. + However, you can still manually associate a built-in security group + with a layer on creation; custom security groups are required only + for those layers that need custom settings. + + + For more information, see `Create a New Stack`_. + + """ + params = {'StackId': stack_id, } + if name is not None: + params['Name'] = name + if attributes is not None: + params['Attributes'] = attributes + if service_role_arn is not None: + params['ServiceRoleArn'] = service_role_arn + if default_instance_profile_arn is not None: + params['DefaultInstanceProfileArn'] = default_instance_profile_arn + if default_os is not None: + params['DefaultOs'] = default_os + if hostname_theme is not None: + params['HostnameTheme'] = hostname_theme + if default_availability_zone is not None: + params['DefaultAvailabilityZone'] = default_availability_zone + if default_subnet_id is not None: + params['DefaultSubnetId'] = default_subnet_id + if custom_json is not None: + params['CustomJson'] = custom_json + if configuration_manager is not None: + params['ConfigurationManager'] = configuration_manager + if chef_configuration is not None: + params['ChefConfiguration'] = chef_configuration + if use_custom_cookbooks is not None: + params['UseCustomCookbooks'] = use_custom_cookbooks + if custom_cookbooks_source is not None: + params['CustomCookbooksSource'] = custom_cookbooks_source + if default_ssh_key_name is not None: + params['DefaultSshKeyName'] = default_ssh_key_name + if default_root_device_type is not None: + params['DefaultRootDeviceType'] = default_root_device_type + if use_opsworks_security_groups is not None: + params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups + return self.make_request(action='UpdateStack', + body=json.dumps(params)) + + def update_user_profile(self, iam_user_arn, ssh_username=None, + ssh_public_key=None, allow_self_management=None): + """ + Updates a specified user profile. + + **Required Permissions**: To use this action, an IAM user must + have an attached policy that explicitly grants permissions. + For more information on user permissions, see `Managing User + Permissions`_. + + :type iam_user_arn: string + :param iam_user_arn: The user IAM ARN. + + :type ssh_username: string + :param ssh_username: The user's SSH user name. The allowable characters + are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name + includes other punctuation marks, AWS OpsWorks removes them. For + example, `my.name` will be changed to `myname`. If you do not + specify an SSH user name, AWS OpsWorks generates one from the IAM + user name. + + :type ssh_public_key: string + :param ssh_public_key: The user's new SSH public key. + + :type allow_self_management: boolean + :param allow_self_management: Whether users can specify their own SSH + public key through the My Settings page. For more information, see + `Managing User Permissions`_. + + """ + params = {'IamUserArn': iam_user_arn, } + if ssh_username is not None: + params['SshUsername'] = ssh_username + if ssh_public_key is not None: + params['SshPublicKey'] = ssh_public_key + if allow_self_management is not None: + params['AllowSelfManagement'] = allow_self_management + return self.make_request(action='UpdateUserProfile', + body=json.dumps(params)) + + def update_volume(self, volume_id, name=None, mount_point=None): + """ + Updates an Amazon EBS volume's name or mount point. For more + information, see `Resource Management`_. + + **Required Permissions**: To use this action, an IAM user must + have a Manage permissions level for the stack, or an attached + policy that explicitly grants permissions. For more + information on user permissions, see `Managing User + Permissions`_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type name: string + :param name: The new name. + + :type mount_point: string + :param mount_point: The new mount point. + + """ + params = {'VolumeId': volume_id, } + if name is not None: + params['Name'] = name + if mount_point is not None: + params['MountPoint'] = mount_point + return self.make_request(action='UpdateVolume', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/plugin.py b/ext/boto/plugin.py new file mode 100644 index 0000000000..2c2931c9df --- /dev/null +++ b/ext/boto/plugin.py @@ -0,0 +1,93 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +""" +Implements plugin related api. + +To define a new plugin just subclass Plugin, like this. + +class AuthPlugin(Plugin): + pass + +Then start creating subclasses of your new plugin. + +class MyFancyAuth(AuthPlugin): + capability = ['sign', 'vmac'] + +The actual interface is duck typed. +""" + +import glob +import imp +import os.path + + +class Plugin(object): + """Base class for all plugins.""" + + capability = [] + + @classmethod + def is_capable(cls, requested_capability): + """Returns true if the requested capability is supported by this plugin + """ + for c in requested_capability: + if c not in cls.capability: + return False + return True + + +def get_plugin(cls, requested_capability=None): + if not requested_capability: + requested_capability = [] + result = [] + for handler in cls.__subclasses__(): + if handler.is_capable(requested_capability): + result.append(handler) + return result + + +def _import_module(filename): + (path, name) = os.path.split(filename) + (name, ext) = os.path.splitext(name) + + (file, filename, data) = imp.find_module(name, [path]) + try: + return imp.load_module(name, file, filename, data) + finally: + if file: + file.close() + +_plugin_loaded = False + + +def load_plugins(config): + global _plugin_loaded + if _plugin_loaded: + return + _plugin_loaded = True + + if not config.has_option('Plugin', 'plugin_directory'): + return + directory = config.get('Plugin', 'plugin_directory') + for file in glob.glob(os.path.join(directory, '*.py')): + _import_module(file) diff --git a/ext/boto/provider.py b/ext/boto/provider.py new file mode 100644 index 0000000000..e08afa3000 --- /dev/null +++ b/ext/boto/provider.py @@ -0,0 +1,484 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright 2010 Google Inc. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +""" +This class encapsulates the provider-specific header differences. +""" + +import os +from boto.compat import six +from datetime import datetime + +import boto +from boto import config +from boto.compat import expanduser +from boto.pyami.config import Config +from boto.exception import InvalidInstanceMetadataError +from boto.gs.acl import ACL +from boto.gs.acl import CannedACLStrings as CannedGSACLStrings +from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings +from boto.s3.acl import Policy + + +HEADER_PREFIX_KEY = 'header_prefix' +METADATA_PREFIX_KEY = 'metadata_prefix' + +AWS_HEADER_PREFIX = 'x-amz-' +GOOG_HEADER_PREFIX = 'x-goog-' + +ACL_HEADER_KEY = 'acl-header' +AUTH_HEADER_KEY = 'auth-header' +COPY_SOURCE_HEADER_KEY = 'copy-source-header' +COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header' +COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header' +DELETE_MARKER_HEADER_KEY = 'delete-marker-header' +DATE_HEADER_KEY = 'date-header' +METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header' +RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header' +SECURITY_TOKEN_HEADER_KEY = 'security-token-header' +STORAGE_CLASS_HEADER_KEY = 'storage-class' +MFA_HEADER_KEY = 'mfa-header' +SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header' +VERSION_ID_HEADER_KEY = 'version-id-header' +RESTORE_HEADER_KEY = 'restore-header' + +STORAGE_COPY_ERROR = 'StorageCopyError' +STORAGE_CREATE_ERROR = 'StorageCreateError' +STORAGE_DATA_ERROR = 'StorageDataError' +STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError' +STORAGE_RESPONSE_ERROR = 'StorageResponseError' +NO_CREDENTIALS_PROVIDED = object() + + +class ProfileNotFoundError(ValueError): + pass + + +class Provider(object): + + CredentialMap = { + 'aws': ('aws_access_key_id', 'aws_secret_access_key', + 'aws_security_token', 'aws_profile'), + 'google': ('gs_access_key_id', 'gs_secret_access_key', + None, None), + } + + AclClassMap = { + 'aws': Policy, + 'google': ACL + } + + CannedAclsMap = { + 'aws': CannedS3ACLStrings, + 'google': CannedGSACLStrings + } + + HostKeyMap = { + 'aws': 's3', + 'google': 'gs' + } + + ChunkedTransferSupport = { + 'aws': False, + 'google': True + } + + MetadataServiceSupport = { + 'aws': True, + 'google': False + } + + # If you update this map please make sure to put "None" for the + # right-hand-side for any headers that don't apply to a provider, rather + # than simply leaving that header out (which would cause KeyErrors). + HeaderInfoMap = { + 'aws': { + HEADER_PREFIX_KEY: AWS_HEADER_PREFIX, + METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'AWS', + COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX + + 'copy-source-range', + DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: None, + SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX + + 'server-side-encryption', + VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class', + MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa', + RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore', + }, + 'google': { + HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX, + METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-', + ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl', + AUTH_HEADER_KEY: 'GOOG1', + COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source', + COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + + 'copy-source-version-id', + COPY_SOURCE_RANGE_HEADER_KEY: None, + DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date', + DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker', + METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX + + 'metadata-directive', + RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable', + SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token', + SERVER_SIDE_ENCRYPTION_KEY: None, + # Note that this version header is not to be confused with + # the Google Cloud Storage 'x-goog-api-version' header. + VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id', + STORAGE_CLASS_HEADER_KEY: GOOG_HEADER_PREFIX + 'storage-class', + MFA_HEADER_KEY: None, + RESTORE_HEADER_KEY: None, + } + } + + ErrorMap = { + 'aws': { + STORAGE_COPY_ERROR: boto.exception.S3CopyError, + STORAGE_CREATE_ERROR: boto.exception.S3CreateError, + STORAGE_DATA_ERROR: boto.exception.S3DataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError, + }, + 'google': { + STORAGE_COPY_ERROR: boto.exception.GSCopyError, + STORAGE_CREATE_ERROR: boto.exception.GSCreateError, + STORAGE_DATA_ERROR: boto.exception.GSDataError, + STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError, + STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError, + } + } + + def __init__(self, name, access_key=None, secret_key=None, + security_token=None, profile_name=None): + self.host = None + self.port = None + self.host_header = None + self.access_key = access_key + self.secret_key = secret_key + self.security_token = security_token + self.profile_name = profile_name + self.name = name + self.acl_class = self.AclClassMap[self.name] + self.canned_acls = self.CannedAclsMap[self.name] + self._credential_expiry_time = None + + # Load shared credentials file if it exists + shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials') + self.shared_credentials = Config(do_load=False) + if os.path.isfile(shared_path): + self.shared_credentials.load_from_path(shared_path) + + self.get_credentials(access_key, secret_key, security_token, profile_name) + self.configure_headers() + self.configure_errors() + + # Allow config file to override default host and port. + host_opt_name = '%s_host' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_opt_name): + self.host = config.get('Credentials', host_opt_name) + port_opt_name = '%s_port' % self.HostKeyMap[self.name] + if config.has_option('Credentials', port_opt_name): + self.port = config.getint('Credentials', port_opt_name) + host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name] + if config.has_option('Credentials', host_header_opt_name): + self.host_header = config.get('Credentials', host_header_opt_name) + + def get_access_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._access_key + + def set_access_key(self, value): + self._access_key = value + + access_key = property(get_access_key, set_access_key) + + def get_secret_key(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._secret_key + + def set_secret_key(self, value): + self._secret_key = value + + secret_key = property(get_secret_key, set_secret_key) + + def get_security_token(self): + if self._credentials_need_refresh(): + self._populate_keys_from_metadata_server() + return self._security_token + + def set_security_token(self, value): + self._security_token = value + + security_token = property(get_security_token, set_security_token) + + def _credentials_need_refresh(self): + if self._credential_expiry_time is None: + return False + else: + # The credentials should be refreshed if they're going to expire + # in less than 5 minutes. + delta = self._credential_expiry_time - datetime.utcnow() + # python2.6 does not have timedelta.total_seconds() so we have + # to calculate this ourselves. This is straight from the + # datetime docs. + seconds_left = ( + (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) + * 10 ** 6) / 10 ** 6) + if seconds_left < (5 * 60): + boto.log.debug("Credentials need to be refreshed.") + return True + else: + return False + + def get_credentials(self, access_key=None, secret_key=None, + security_token=None, profile_name=None): + access_key_name, secret_key_name, security_token_name, \ + profile_name_name = self.CredentialMap[self.name] + + # Load profile from shared environment variable if it was not + # already passed in and the environment variable exists + if profile_name is None and profile_name_name is not None and \ + profile_name_name.upper() in os.environ: + profile_name = os.environ[profile_name_name.upper()] + + shared = self.shared_credentials + + if access_key is not None: + self.access_key = access_key + boto.log.debug("Using access key provided by client.") + elif access_key_name.upper() in os.environ: + self.access_key = os.environ[access_key_name.upper()] + boto.log.debug("Using access key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, access_key_name): + self.access_key = shared.get(profile_name, access_key_name) + boto.log.debug("Using access key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, + access_key_name): + self.access_key = config.get("profile %s" % profile_name, + access_key_name) + boto.log.debug("Using access key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', access_key_name): + self.access_key = shared.get('default', access_key_name) + boto.log.debug("Using access key found in shared credential file.") + elif config.has_option('Credentials', access_key_name): + self.access_key = config.get('Credentials', access_key_name) + boto.log.debug("Using access key found in config file.") + + if secret_key is not None: + self.secret_key = secret_key + boto.log.debug("Using secret key provided by client.") + elif secret_key_name.upper() in os.environ: + self.secret_key = os.environ[secret_key_name.upper()] + boto.log.debug("Using secret key found in environment variable.") + elif profile_name is not None: + if shared.has_option(profile_name, secret_key_name): + self.secret_key = shared.get(profile_name, secret_key_name) + boto.log.debug("Using secret key found in shared credential " + "file for profile %s." % profile_name) + elif config.has_option("profile %s" % profile_name, secret_key_name): + self.secret_key = config.get("profile %s" % profile_name, + secret_key_name) + boto.log.debug("Using secret key found in config file: " + "profile %s." % profile_name) + else: + raise ProfileNotFoundError('Profile "%s" not found!' % + profile_name) + elif shared.has_option('default', secret_key_name): + self.secret_key = shared.get('default', secret_key_name) + boto.log.debug("Using secret key found in shared credential file.") + elif config.has_option('Credentials', secret_key_name): + self.secret_key = config.get('Credentials', secret_key_name) + boto.log.debug("Using secret key found in config file.") + elif config.has_option('Credentials', 'keyring'): + keyring_name = config.get('Credentials', 'keyring') + try: + import keyring + except ImportError: + boto.log.error("The keyring module could not be imported. " + "For keyring support, install the keyring " + "module.") + raise + self.secret_key = keyring.get_password( + keyring_name, self.access_key) + boto.log.debug("Using secret key found in keyring.") + + if security_token is not None: + self.security_token = security_token + boto.log.debug("Using security token provided by client.") + elif ((security_token_name is not None) and + (access_key is None) and (secret_key is None)): + # Only provide a token from the environment/config if the + # caller did not specify a key and secret. Otherwise an + # environment/config token could be paired with a + # different set of credentials provided by the caller + if security_token_name.upper() in os.environ: + self.security_token = os.environ[security_token_name.upper()] + boto.log.debug("Using security token found in environment" + " variable.") + elif shared.has_option(profile_name or 'default', + security_token_name): + self.security_token = shared.get(profile_name or 'default', + security_token_name) + boto.log.debug("Using security token found in shared " + "credential file.") + elif profile_name is not None: + if config.has_option("profile %s" % profile_name, + security_token_name): + boto.log.debug("config has option") + self.security_token = config.get("profile %s" % profile_name, + security_token_name) + boto.log.debug("Using security token found in config file: " + "profile %s." % profile_name) + elif config.has_option('Credentials', security_token_name): + self.security_token = config.get('Credentials', + security_token_name) + boto.log.debug("Using security token found in config file.") + + if ((self._access_key is None or self._secret_key is None) and + self.MetadataServiceSupport[self.name]): + self._populate_keys_from_metadata_server() + self._secret_key = self._convert_key_to_str(self._secret_key) + + def _populate_keys_from_metadata_server(self): + # get_instance_metadata is imported here because of a circular + # dependency. + boto.log.debug("Retrieving credentials from metadata server.") + from boto.utils import get_instance_metadata + timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0) + attempts = config.getint('Boto', 'metadata_service_num_attempts', 1) + # The num_retries arg is actually the total number of attempts made, + # so the config options is named *_num_attempts to make this more + # clear to users. + metadata = get_instance_metadata( + timeout=timeout, num_retries=attempts, + data='meta-data/iam/security-credentials/') + if metadata: + creds = self._get_credentials_from_metadata(metadata) + self._access_key = creds[0] + self._secret_key = creds[1] + self._security_token = creds[2] + expires_at = creds[3] + # I'm assuming there's only one role on the instance profile. + self._credential_expiry_time = datetime.strptime( + expires_at, "%Y-%m-%dT%H:%M:%SZ") + boto.log.debug("Retrieved credentials will expire in %s at: %s", + self._credential_expiry_time - datetime.now(), + expires_at) + + def _get_credentials_from_metadata(self, metadata): + # Given metadata, return a tuple of (access, secret, token, expiration) + # On errors, an InvalidInstanceMetadataError will be raised. + # The "metadata" is a lazy loaded dictionary means that it's possible + # to still encounter errors as we traverse through the metadata dict. + # We try to be careful and raise helpful error messages when this + # happens. + creds = list(metadata.values())[0] + if not isinstance(creds, dict): + # We want to special case a specific error condition which is + # where get_instance_metadata() returns an empty string on + # error conditions. + if creds == '': + msg = 'an empty string' + else: + msg = 'type: %s' % creds + raise InvalidInstanceMetadataError("Expected a dict type of " + "credentials instead received " + "%s" % (msg)) + try: + access_key = creds['AccessKeyId'] + secret_key = self._convert_key_to_str(creds['SecretAccessKey']) + security_token = creds['Token'] + expires_at = creds['Expiration'] + except KeyError as e: + raise InvalidInstanceMetadataError( + "Credentials from instance metadata missing " + "required key: %s" % e) + return access_key, secret_key, security_token, expires_at + + def _convert_key_to_str(self, key): + if isinstance(key, six.text_type): + # the secret key must be bytes and not unicode to work + # properly with hmac.new (see http://bugs.python.org/issue5285) + return str(key) + return key + + def configure_headers(self): + header_info_map = self.HeaderInfoMap[self.name] + self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY] + self.header_prefix = header_info_map[HEADER_PREFIX_KEY] + self.acl_header = header_info_map[ACL_HEADER_KEY] + self.auth_header = header_info_map[AUTH_HEADER_KEY] + self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY] + self.copy_source_version_id = header_info_map[ + COPY_SOURCE_VERSION_ID_HEADER_KEY] + self.copy_source_range_header = header_info_map[ + COPY_SOURCE_RANGE_HEADER_KEY] + self.date_header = header_info_map[DATE_HEADER_KEY] + self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY] + self.metadata_directive_header = ( + header_info_map[METADATA_DIRECTIVE_HEADER_KEY]) + self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY] + self.resumable_upload_header = ( + header_info_map[RESUMABLE_UPLOAD_HEADER_KEY]) + self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY] + self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY] + self.version_id = header_info_map[VERSION_ID_HEADER_KEY] + self.mfa_header = header_info_map[MFA_HEADER_KEY] + self.restore_header = header_info_map[RESTORE_HEADER_KEY] + + def configure_errors(self): + error_map = self.ErrorMap[self.name] + self.storage_copy_error = error_map[STORAGE_COPY_ERROR] + self.storage_create_error = error_map[STORAGE_CREATE_ERROR] + self.storage_data_error = error_map[STORAGE_DATA_ERROR] + self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR] + self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR] + + def get_provider_name(self): + return self.HostKeyMap[self.name] + + def supports_chunked_transfer(self): + return self.ChunkedTransferSupport[self.name] + + +# Static utility method for getting default Provider. +def get_default(): + return Provider('aws') diff --git a/ext/boto/pyami/__init__.py b/ext/boto/pyami/__init__.py new file mode 100644 index 0000000000..303dbb66c9 --- /dev/null +++ b/ext/boto/pyami/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/ext/boto/pyami/bootstrap.py b/ext/boto/pyami/bootstrap.py new file mode 100644 index 0000000000..82c2822edd --- /dev/null +++ b/ext/boto/pyami/bootstrap.py @@ -0,0 +1,134 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import boto +from boto.utils import get_instance_metadata, get_instance_userdata +from boto.pyami.config import Config, BotoConfigPath +from boto.pyami.scriptbase import ScriptBase +import time + +class Bootstrap(ScriptBase): + """ + The Bootstrap class is instantiated and run as part of the PyAMI + instance initialization process. The methods in this class will + be run from the rc.local script of the instance and will be run + as the root user. + + The main purpose of this class is to make sure the boto distribution + on the instance is the one required. + """ + + def __init__(self): + self.working_dir = '/mnt/pyami' + self.write_metadata() + super(Bootstrap, self).__init__() + + def write_metadata(self): + fp = open(os.path.expanduser(BotoConfigPath), 'w') + fp.write('[Instance]\n') + inst_data = get_instance_metadata() + for key in inst_data: + fp.write('%s = %s\n' % (key, inst_data[key])) + user_data = get_instance_userdata() + fp.write('\n%s\n' % user_data) + fp.write('[Pyami]\n') + fp.write('working_dir = %s\n' % self.working_dir) + fp.close() + # This file has the AWS credentials, should we lock it down? + # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE) + # now that we have written the file, read it into a pyami Config object + boto.config = Config() + boto.init_logging() + + def create_working_dir(self): + boto.log.info('Working directory: %s' % self.working_dir) + if not os.path.exists(self.working_dir): + os.mkdir(self.working_dir) + + def load_boto(self): + update = boto.config.get('Boto', 'boto_update', 'svn:HEAD') + if update.startswith('svn'): + if update.find(':') >= 0: + method, version = update.split(':') + version = '-r%s' % version + else: + version = '-rHEAD' + location = boto.config.get('Boto', 'boto_location', '/usr/local/boto') + self.run('svn update %s %s' % (version, location)) + elif update.startswith('git'): + location = boto.config.get('Boto', 'boto_location', '/usr/share/python-support/python-boto/boto') + num_remaining_attempts = 10 + while num_remaining_attempts > 0: + num_remaining_attempts -= 1 + try: + self.run('git pull', cwd=location) + num_remaining_attempts = 0 + except Exception as e: + boto.log.info('git pull attempt failed with the following exception. Trying again in a bit. %s', e) + time.sleep(2) + if update.find(':') >= 0: + method, version = update.split(':') + else: + version = 'master' + self.run('git checkout %s' % version, cwd=location) + else: + # first remove the symlink needed when running from subversion + self.run('rm /usr/local/lib/python2.5/site-packages/boto') + self.run('easy_install %s' % update) + + def fetch_s3_file(self, s3_file): + try: + from boto.utils import fetch_file + f = fetch_file(s3_file) + path = os.path.join(self.working_dir, s3_file.split("/")[-1]) + open(path, "w").write(f.read()) + except: + boto.log.exception('Problem Retrieving file: %s' % s3_file) + path = None + return path + + def load_packages(self): + package_str = boto.config.get('Pyami', 'packages') + if package_str: + packages = package_str.split(',') + for package in packages: + package = package.strip() + if package.startswith('s3:'): + package = self.fetch_s3_file(package) + if package: + # if the "package" is really a .py file, it doesn't have to + # be installed, just being in the working dir is enough + if not package.endswith('.py'): + self.run('easy_install -Z %s' % package, exit_on_error=False) + + def main(self): + self.create_working_dir() + self.load_boto() + self.load_packages() + self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id')) + +if __name__ == "__main__": + # because bootstrap starts before any logging configuration can be loaded from + # the boto config files, we will manually enable logging to /var/log/boto.log + boto.set_file_logger('bootstrap', '/var/log/boto.log') + bs = Bootstrap() + bs.main() diff --git a/ext/boto/pyami/config.py b/ext/boto/pyami/config.py new file mode 100644 index 0000000000..e0b6e2f362 --- /dev/null +++ b/ext/boto/pyami/config.py @@ -0,0 +1,235 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import os +import re +import warnings + +import boto + +from boto.compat import expanduser, ConfigParser, NoOptionError, NoSectionError, StringIO + + +# By default we use two locations for the boto configurations, +# /etc/boto.cfg and ~/.boto (which works on Windows and Unix). +BotoConfigPath = '/etc/boto.cfg' +BotoConfigLocations = [BotoConfigPath] +UserConfigPath = os.path.join(expanduser('~'), '.boto') +BotoConfigLocations.append(UserConfigPath) + +# If there's a BOTO_CONFIG variable set, we load ONLY +# that variable +if 'BOTO_CONFIG' in os.environ: + BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])] + +# If there's a BOTO_PATH variable set, we use anything there +# as the current configuration locations, split with os.pathsep. +elif 'BOTO_PATH' in os.environ: + BotoConfigLocations = [] + for path in os.environ['BOTO_PATH'].split(os.pathsep): + BotoConfigLocations.append(expanduser(path)) + + +class Config(object): + + def __init__(self, path=None, fp=None, do_load=True): + self._parser = ConfigParser({'working_dir': '/mnt/pyami', + 'debug': '0'}) + if do_load: + if path: + self.load_from_path(path) + elif fp: + self.readfp(fp) + else: + self.read(BotoConfigLocations) + if "AWS_CREDENTIAL_FILE" in os.environ: + full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE']) + try: + self.load_credential_file(full_path) + except IOError: + warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path) + + def __setstate__(self, state): + # There's test that verify that (transitively) a Config + # object can be pickled. Now that we're storing a _parser + # attribute and relying on __getattr__ to proxy requests, + # we need to implement setstate to ensure we don't get + # into recursive loops when looking up _parser when + # this object is unpickled. + self._parser = state['_parser'] + + def __getattr__(self, name): + return getattr(self._parser, name) + + def has_option(self, *args, **kwargs): + return self._parser.has_option(*args, **kwargs) + + def load_credential_file(self, path): + """Load a credential file as is setup like the Java utilities""" + c_data = StringIO() + c_data.write("[Credentials]\n") + for line in open(path, "r").readlines(): + c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) + c_data.seek(0) + self.readfp(c_data) + + def load_from_path(self, path): + file = open(path) + for line in file.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + extended_file = match.group(1) + (dir, file) = os.path.split(path) + self.load_from_path(os.path.join(dir, extended_file)) + self.read(path) + + def save_option(self, path, section, option, value): + """ + Write the specified Section.Option to the config file specified by path. + Replace any previous value. If the path doesn't exist, create it. + Also add the option the the in-memory config. + """ + config = ConfigParser() + config.read(path) + if not config.has_section(section): + config.add_section(section) + config.set(section, option, value) + fp = open(path, 'w') + config.write(fp) + fp.close() + if not self.has_section(section): + self.add_section(section) + self.set(section, option, value) + + def save_user_option(self, section, option, value): + self.save_option(UserConfigPath, section, option, value) + + def save_system_option(self, section, option, value): + self.save_option(BotoConfigPath, section, option, value) + + def get_instance(self, name, default=None): + try: + val = self.get('Instance', name) + except (NoOptionError, NoSectionError): + val = default + return val + + def get_user(self, name, default=None): + try: + val = self.get('User', name) + except (NoOptionError, NoSectionError): + val = default + return val + + def getint_user(self, name, default=0): + try: + val = self.getint('User', name) + except (NoOptionError, NoSectionError): + val = default + return val + + def get_value(self, section, name, default=None): + return self.get(section, name, default) + + def get(self, section, name, default=None): + try: + return self._parser.get(section, name) + except (NoOptionError, NoSectionError): + return default + + def getint(self, section, name, default=0): + try: + return self._parser.getint(section, name) + except (NoOptionError, NoSectionError): + return int(default) + + def getfloat(self, section, name, default=0.0): + try: + return self._parser.getfloat(section, name) + except (NoOptionError, NoSectionError): + return float(default) + + def getbool(self, section, name, default=False): + if self.has_option(section, name): + val = self.get(section, name) + if val.lower() == 'true': + val = True + else: + val = False + else: + val = default + return val + + def setbool(self, section, name, value): + if value: + self.set(section, name, 'true') + else: + self.set(section, name, 'false') + + def dump(self): + s = StringIO() + self.write(s) + print(s.getvalue()) + + def dump_safe(self, fp=None): + if not fp: + fp = StringIO() + for section in self.sections(): + fp.write('[%s]\n' % section) + for option in self.options(section): + if option == 'aws_secret_access_key': + fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) + else: + fp.write('%s = %s\n' % (option, self.get(section, option))) + + def dump_to_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + if not domain: + domain = sdb.create_domain(domain_name) + item = domain.new_item(item_name) + item.active = False + for section in self.sections(): + d = {} + for option in self.options(section): + d[option] = self.get(section, option) + item[section] = json.dumps(d) + item.save() + + def load_from_sdb(self, domain_name, item_name): + from boto.compat import json + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + item = domain.get_item(item_name) + for section in item.keys(): + if not self.has_section(section): + self.add_section(section) + d = json.loads(item[section]) + for attr_name in d.keys(): + attr_value = d[attr_name] + if attr_value is None: + attr_value = 'None' + if isinstance(attr_value, bool): + self.setbool(section, attr_name, attr_value) + else: + self.set(section, attr_name, attr_value) diff --git a/ext/boto/pyami/copybot.cfg b/ext/boto/pyami/copybot.cfg new file mode 100644 index 0000000000..cbfdc5ad19 --- /dev/null +++ b/ext/boto/pyami/copybot.cfg @@ -0,0 +1,60 @@ +# +# Your AWS Credentials +# +[Credentials] +aws_access_key_id = +aws_secret_access_key = + +# +# If you want to use a separate set of credentials when writing +# to the destination bucket, put them here +#dest_aws_access_key_id = +#dest_aws_secret_access_key = + +# +# Fill out this section if you want emails from CopyBot +# when it starts and stops +# +[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +# +# If you leave this section as is, it will automatically +# update boto from subversion upon start up. +# If you don't want that to happen, comment this out +# +[Boto] +boto_location = /usr/local/boto +boto_update = svn:HEAD + +# +# This tells the Pyami code in boto what scripts +# to run during startup +# +[Pyami] +scripts = boto.pyami.copybot.CopyBot + +# +# Source bucket and Destination Bucket, obviously. +# If the Destination bucket does not exist, it will +# attempt to create it. +# If exit_on_completion is false, the instance +# will keep running after the copy operation is +# complete which might be handy for debugging. +# If copy_acls is false, the ACL's will not be +# copied with the objects to the new bucket. +# If replace_dst is false, copybot will not +# will only store the source file in the dest if +# that file does not already exist. If it's true +# it will replace it even if it does exist. +# +[CopyBot] +src_bucket = +dst_bucket = +exit_on_completion = true +copy_acls = true +replace_dst = true diff --git a/ext/boto/pyami/copybot.py b/ext/boto/pyami/copybot.py new file mode 100644 index 0000000000..09a6d444c5 --- /dev/null +++ b/ext/boto/pyami/copybot.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto +from boto.pyami.scriptbase import ScriptBase +import os, StringIO + +class CopyBot(ScriptBase): + + def __init__(self): + super(CopyBot, self).__init__() + self.wdir = boto.config.get('Pyami', 'working_dir') + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.wdir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + self.src_name = boto.config.get(self.name, 'src_bucket') + self.dst_name = boto.config.get(self.name, 'dst_bucket') + self.replace = boto.config.getbool(self.name, 'replace_dst', True) + s3 = boto.connect_s3() + self.src = s3.lookup(self.src_name) + if not self.src: + boto.log.error('Source bucket does not exist: %s' % self.src_name) + dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None) + if dest_access_key: + dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None) + s3 = boto.connect(dest_access_key, dest_secret_key) + self.dst = s3.lookup(self.dst_name) + if not self.dst: + self.dst = s3.create_bucket(self.dst_name) + + def copy_bucket_acl(self): + if boto.config.get(self.name, 'copy_acls', True): + acl = self.src.get_xml_acl() + self.dst.set_xml_acl(acl) + + def copy_key_acl(self, src, dst): + if boto.config.get(self.name, 'copy_acls', True): + acl = src.get_xml_acl() + dst.set_xml_acl(acl) + + def copy_keys(self): + boto.log.info('src=%s' % self.src.name) + boto.log.info('dst=%s' % self.dst.name) + try: + for key in self.src: + if not self.replace: + exists = self.dst.lookup(key.name) + if exists: + boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name)) + continue + boto.log.info('copying %d bytes from key=%s' % (key.size, key.name)) + prefix, base = os.path.split(key.name) + path = os.path.join(self.wdir, base) + key.get_contents_to_filename(path) + new_key = self.dst.new_key(key.name) + new_key.set_contents_from_filename(path) + self.copy_key_acl(key, new_key) + os.unlink(path) + except: + boto.log.exception('Error copying key: %s' % key.name) + + def copy_log(self): + key = self.dst.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + + def main(self): + fp = StringIO.StringIO() + boto.config.dump_safe(fp) + self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue()) + if self.src and self.dst: + self.copy_keys() + if self.dst: + self.copy_log() + self.notify('%s (%s) Stopping' % (self.name, self.instance_id), + 'Copy Operation Complete') + if boto.config.getbool(self.name, 'exit_on_completion', True): + ec2 = boto.connect_ec2() + ec2.terminate_instances([self.instance_id]) diff --git a/ext/boto/pyami/helloworld.py b/ext/boto/pyami/helloworld.py new file mode 100644 index 0000000000..b9b53b60c5 --- /dev/null +++ b/ext/boto/pyami/helloworld.py @@ -0,0 +1,27 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + +class HelloWorld(ScriptBase): + + def main(self): + self.log('Hello World!!!') diff --git a/ext/boto/pyami/installers/__init__.py b/ext/boto/pyami/installers/__init__.py new file mode 100644 index 0000000000..44abd0d24a --- /dev/null +++ b/ext/boto/pyami/installers/__init__.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + + +class Installer(ScriptBase): + """ + Abstract base class for installers + """ + + def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): + """ + Add an entry to the system crontab. + """ + raise NotImplementedError + + def add_init_script(self, file): + """ + Add this file to the init.d directory + """ + + def add_env(self, key, value): + """ + Add an environemnt variable + """ + raise NotImplementedError + + def stop(self, service_name): + """ + Stop a service. + """ + raise NotImplementedError + + def start(self, service_name): + """ + Start a service. + """ + raise NotImplementedError + + def install(self): + """ + Do whatever is necessary to "install" the package. + """ + raise NotImplementedError diff --git a/ext/boto/pyami/installers/ubuntu/__init__.py b/ext/boto/pyami/installers/ubuntu/__init__.py new file mode 100644 index 0000000000..60ee658e34 --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/ext/boto/pyami/installers/ubuntu/apache.py b/ext/boto/pyami/installers/ubuntu/apache.py new file mode 100644 index 0000000000..febc2dfa25 --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/apache.py @@ -0,0 +1,43 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer + +class Apache(Installer): + """ + Install apache2, mod_python, and libapache2-svn + """ + + def install(self): + self.run("apt-get update") + self.run('apt-get -y install apache2', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True) + self.run('a2enmod rewrite', notify=True, exit_on_error=True) + self.run('a2enmod ssl', notify=True, exit_on_error=True) + self.run('a2enmod proxy', notify=True, exit_on_error=True) + self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True) + + # Hard reboot the apache2 server to enable these module + self.stop("apache2") + self.start("apache2") + + def main(self): + self.install() diff --git a/ext/boto/pyami/installers/ubuntu/ebs.py b/ext/boto/pyami/installers/ubuntu/ebs.py new file mode 100644 index 0000000000..a0a507045e --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/ebs.py @@ -0,0 +1,238 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Automated installer to attach, format and mount an EBS volume. +This installer assumes that you want the volume formatted as +an XFS file system. To drive this installer, you need the +following section in the boto config passed to the new instance. +You also need to install dateutil by listing python-dateutil +in the list of packages to be installed in the Pyami seciont +of your boto config file. + +If there is already a device mounted at the specified mount point, +the installer assumes that it is the ephemeral drive and unmounts +it, remounts it as /tmp and chmods it to 777. + +Config file section:: + + [EBS] + volume_id = + logical_volume_name = + device = + mount_point = + +""" +import boto +from boto.manage.volume import Volume +from boto.exception import EC2ResponseError +import os, time +from boto.pyami.installers.ubuntu.installer import Installer +from string import Template + +BackupScriptTemplate = """#!/usr/bin/env python +# Backup EBS volume +import boto +from boto.pyami.scriptbase import ScriptBase +import traceback + +class Backup(ScriptBase): + + def main(self): + try: + ec2 = boto.connect_ec2() + self.run("/usr/sbin/xfs_freeze -f ${mount_point}", exit_on_error = True) + snapshot = ec2.create_snapshot('${volume_id}') + boto.log.info("Snapshot created: %s " % snapshot) + except Exception as e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + boto.log.info("Snapshot created: ${volume_id}") + except Exception as e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + finally: + self.run("/usr/sbin/xfs_freeze -u ${mount_point}") + +if __name__ == "__main__": + b = Backup() + b.main() +""" + +BackupCleanupScript= """#!/usr/bin/env python +import boto +from boto.manage.volume import Volume + +# Cleans Backups of EBS volumes + +for v in Volume.all(): + v.trim_snapshots(True) +""" + +TagBasedBackupCleanupScript= """#!/usr/bin/env python +import boto + +# Cleans Backups of EBS volumes + +ec2 = boto.connect_ec2() +ec2.trim_snapshots() +""" + +class EBSInstaller(Installer): + """ + Set up the EBS stuff + """ + + def __init__(self, config_file=None): + super(EBSInstaller, self).__init__(config_file) + self.instance_id = boto.config.get('Instance', 'instance-id') + self.device = boto.config.get('EBS', 'device', '/dev/sdp') + self.volume_id = boto.config.get('EBS', 'volume_id') + self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name') + self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs') + + def attach(self): + ec2 = boto.connect_ec2() + if self.logical_volume_name: + # if a logical volume was specified, override the specified volume_id + # (if there was one) with the current AWS volume for the logical volume: + logical_volume = next(Volume.find(name=self.logical_volume_name)) + self.volume_id = logical_volume._volume_id + volume = ec2.get_all_volumes([self.volume_id])[0] + # wait for the volume to be available. The volume may still be being created + # from a snapshot. + while volume.update() != 'available': + boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status)) + time.sleep(5) + instance = ec2.get_only_instances([self.instance_id])[0] + attempt_attach = True + while attempt_attach: + try: + ec2.attach_volume(self.volume_id, self.instance_id, self.device) + attempt_attach = False + except EC2ResponseError as e: + if e.error_code != 'IncorrectState': + # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2 + # to realize the instance is running, then try again. Otherwise, raise the error: + boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors)) + time.sleep(2) + else: + raise e + boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device)) + # now wait for the volume device to appear + while not os.path.exists(self.device): + boto.log.info('%s still does not exist, waiting 2 seconds' % self.device) + time.sleep(2) + + def make_fs(self): + boto.log.info('make_fs...') + has_fs = self.run('fsck %s' % self.device) + if has_fs != 0: + self.run('mkfs -t xfs %s' % self.device) + + def create_backup_script(self): + t = Template(BackupScriptTemplate) + s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id, + mount_point=self.mount_point) + fp = open('/usr/local/bin/ebs_backup', 'w') + fp.write(s) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup') + + def create_backup_cleanup_script(self, use_tag_based_cleanup=False): + fp = open('/usr/local/bin/ebs_backup_cleanup', 'w') + if use_tag_based_cleanup: + fp.write(TagBasedBackupCleanupScript) + else: + fp.write(BackupCleanupScript) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup_cleanup') + + def handle_mount_point(self): + boto.log.info('handle_mount_point') + if not os.path.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + self.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + self.run('mount -l') + lines = self.last_command.output.split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + self.run('umount %s' % self.mount_point) + self.run('mount %s /tmp' % t[0]) + break + self.run('chmod 777 /tmp') + # Mount up our new EBS volume onto mount_point + self.run("mount %s %s" % (self.device, self.mount_point)) + self.run('xfs_growfs %s' % self.mount_point) + + def update_fstab(self): + f = open("/etc/fstab", "a") + f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.device, self.mount_point)) + f.close() + + def install(self): + # First, find and attach the volume + self.attach() + + # Install the xfs tools + self.run('apt-get -y install xfsprogs xfsdump') + + # Check to see if the filesystem was created or not + self.make_fs() + + # create the /ebs directory for mounting + self.handle_mount_point() + + # create the backup script + self.create_backup_script() + + # Set up the backup script + minute = boto.config.get('EBS', 'backup_cron_minute', '0') + hour = boto.config.get('EBS', 'backup_cron_hour', '4,16') + self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour) + + # Set up the backup cleanup script + minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') + hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') + if (minute is not None) and (hour is not None): + # Snapshot clean up can either be done via the manage module, or via the new tag based + # snapshot code, if the snapshots have been tagged with the name of the associated + # volume. Check for the presence of the new configuration flag, and use the appropriate + # cleanup method / script: + use_tag_based_cleanup = boto.config.has_option('EBS', 'use_tag_based_snapshot_cleanup') + self.create_backup_cleanup_script(use_tag_based_cleanup) + self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour) + + # Set up the fstab + self.update_fstab() + + def main(self): + if not os.path.exists(self.device): + self.install() + else: + boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device) diff --git a/ext/boto/pyami/installers/ubuntu/installer.py b/ext/boto/pyami/installers/ubuntu/installer.py new file mode 100644 index 0000000000..5a2abd908b --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/installer.py @@ -0,0 +1,94 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto.pyami.installers +import os +import os.path +import stat +import boto +import random +from pwd import getpwnam + +class Installer(boto.pyami.installers.Installer): + """ + Base Installer class for Ubuntu-based AMI's + """ + def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None): + """ + Write a file to /etc/cron.d to schedule a command + env is a dict containing environment variables you want to set in the file + name will be used as the name of the file + """ + if minute == 'random': + minute = str(random.randrange(60)) + if hour == 'random': + hour = str(random.randrange(24)) + fp = open('/etc/cron.d/%s' % name, "w") + if env: + for key, value in env.items(): + fp.write('%s=%s\n' % (key, value)) + fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command)) + fp.close() + + def add_init_script(self, file, name): + """ + Add this file to the init.d directory + """ + f_path = os.path.join("/etc/init.d", name) + f = open(f_path, "w") + f.write(file) + f.close() + os.chmod(f_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) + self.run("/usr/sbin/update-rc.d %s defaults" % name) + + def add_env(self, key, value): + """ + Add an environemnt variable + For Ubuntu, the best place is /etc/environment. Values placed here do + not need to be exported. + """ + boto.log.info('Adding env variable: %s=%s' % (key, value)) + if not os.path.exists("/etc/environment.orig"): + self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False) + fp = open('/etc/environment', 'a') + fp.write('\n%s="%s"' % (key, value)) + fp.close() + os.environ[key] = value + + def stop(self, service_name): + self.run('/etc/init.d/%s stop' % service_name) + + def start(self, service_name): + self.run('/etc/init.d/%s start' % service_name) + + def create_user(self, user): + """ + Create a user on the local system + """ + self.run("useradd -m %s" % user) + usr = getpwnam(user) + return usr + + def install(self): + """ + This is the only method you need to override + """ + raise NotImplementedError diff --git a/ext/boto/pyami/installers/ubuntu/mysql.py b/ext/boto/pyami/installers/ubuntu/mysql.py new file mode 100644 index 0000000000..5b0792ba9d --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/mysql.py @@ -0,0 +1,108 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This installer will install mysql-server on an Ubuntu machine. +In addition to the normal installation done by apt-get, it will +also configure the new MySQL server to store it's data files in +a different location. By default, this is /mnt but that can be +configured in the [MySQL] section of the boto config file passed +to the instance. +""" +from boto.pyami.installers.ubuntu.installer import Installer +import os +import boto +from boto.utils import ShellCommand +from boto.compat import ConfigParser +import time + +ConfigSection = """ +[MySQL] +root_password = +data_dir = +""" + +class MySQL(Installer): + + def install(self): + self.run('apt-get update') + self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True) + +# def set_root_password(self, password=None): +# if not password: +# password = boto.config.get('MySQL', 'root_password') +# if password: +# self.run('mysqladmin -u root password %s' % password) +# return password + + def change_data_dir(self, password=None): + data_dir = boto.config.get('MySQL', 'data_dir', '/mnt') + fresh_install = False + is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running + is_mysql_running_command.run() + if is_mysql_running_command.getStatus() == 0: + # mysql is running. This is the state apt-get will leave it in. If it isn't running, + # that means mysql was already installed on the AMI and there's no need to stop it, + # saving 40 seconds on instance startup. + time.sleep(10) #trying to stop mysql immediately after installing it fails + # We need to wait until mysql creates the root account before we kill it + # or bad things will happen + i = 0 + while self.run("echo 'quit' | mysql -u root") != 0 and i < 5: + time.sleep(5) + i = i + 1 + self.run('/etc/init.d/mysql stop') + self.run("pkill -9 mysql") + + mysql_path = os.path.join(data_dir, 'mysql') + if not os.path.exists(mysql_path): + self.run('mkdir %s' % mysql_path) + fresh_install = True + self.run('chown -R mysql:mysql %s' % mysql_path) + fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w') + fp.write('# created by pyami\n') + fp.write('# use the %s volume for data\n' % data_dir) + fp.write('[mysqld]\n') + fp.write('datadir = %s\n' % mysql_path) + fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log')) + fp.close() + if fresh_install: + self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path) + self.start('mysql') + else: + #get the password ubuntu expects to use: + config_parser = ConfigParser() + config_parser.read('/etc/mysql/debian.cnf') + password = config_parser.get('client', 'password') + # start the mysql deamon, then mysql with the required grant statement piped into it: + self.start('mysql') + time.sleep(10) #time for mysql to start + grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password + while self.run(grant_command) != 0: + time.sleep(5) + # leave mysqld running + + def main(self): + self.install() + # change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i + # and changing that is too ugly to be worth it: + #self.set_root_password() + self.change_data_dir() diff --git a/ext/boto/pyami/installers/ubuntu/trac.py b/ext/boto/pyami/installers/ubuntu/trac.py new file mode 100644 index 0000000000..8c51c8f720 --- /dev/null +++ b/ext/boto/pyami/installers/ubuntu/trac.py @@ -0,0 +1,139 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer +import boto +import os + +class Trac(Installer): + """ + Install Trac and DAV-SVN + Sets up a Vhost pointing to [Trac]->home + Using the config parameter [Trac]->hostname + Sets up a trac environment for every directory found under [Trac]->data_dir + + [Trac] + name = My Foo Server + hostname = trac.foo.com + home = /mnt/sites/trac + data_dir = /mnt/trac + svn_dir = /mnt/subversion + server_admin = root@foo.com + sdb_auth_domain = users + # Optional + SSLCertificateFile = /mnt/ssl/foo.crt + SSLCertificateKeyFile = /mnt/ssl/foo.key + SSLCertificateChainFile = /mnt/ssl/FooCA.crt + + """ + + def install(self): + self.run('apt-get -y install trac', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-svn', notify=True, exit_on_error=True) + self.run("a2enmod ssl") + self.run("a2enmod mod_python") + self.run("a2enmod dav_svn") + self.run("a2enmod rewrite") + # Make sure that boto.log is writable by everyone so that subversion post-commit hooks can + # write to it. + self.run("touch /var/log/boto.log") + self.run("chmod a+w /var/log/boto.log") + + def setup_vhost(self): + domain = boto.config.get("Trac", "hostname").strip() + if domain: + domain_info = domain.split('.') + cnf = open("/etc/apache2/sites-available/%s" % domain_info[0], "w") + cnf.write("NameVirtualHost *:80\n") + if boto.config.get("Trac", "SSLCertificateFile"): + cnf.write("NameVirtualHost *:443\n\n") + cnf.write("\n") + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tRewriteEngine On\n") + cnf.write("\tRewriteRule ^(.*)$ https://%s$1\n" % domain) + cnf.write("\n\n") + + cnf.write("\n") + else: + cnf.write("\n") + + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tDocumentRoot %s\n" % boto.config.get("Trac", "home").strip()) + + cnf.write("\t\n" % boto.config.get("Trac", "home").strip()) + cnf.write("\t\tOptions FollowSymLinks Indexes MultiViews\n") + cnf.write("\t\tAllowOverride All\n") + cnf.write("\t\tOrder allow,deny\n") + cnf.write("\t\tallow from all\n") + cnf.write("\t\n") + + cnf.write("\t\n") + cnf.write("\t\tAuthType Basic\n") + cnf.write("\t\tAuthName \"%s\"\n" % boto.config.get("Trac", "name")) + cnf.write("\t\tRequire valid-user\n") + cnf.write("\t\tAuthUserFile /mnt/apache/passwd/passwords\n") + cnf.write("\t\n") + + data_dir = boto.config.get("Trac", "data_dir") + for env in os.listdir(data_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tSetHandler mod_python\n") + cnf.write("\t\tPythonInterpreter main_interpreter\n") + cnf.write("\t\tPythonHandler trac.web.modpython_frontend\n") + cnf.write("\t\tPythonOption TracEnv %s/%s\n" % (data_dir, env)) + cnf.write("\t\tPythonOption TracUriRoot /trac/%s\n" % env) + cnf.write("\t\n") + + svn_dir = boto.config.get("Trac", "svn_dir") + for env in os.listdir(svn_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tDAV svn\n") + cnf.write("\t\tSVNPath %s/%s\n" % (svn_dir, env)) + cnf.write("\t\n") + + cnf.write("\tErrorLog /var/log/apache2/error.log\n") + cnf.write("\tLogLevel warn\n") + cnf.write("\tCustomLog /var/log/apache2/access.log combined\n") + cnf.write("\tServerSignature On\n") + SSLCertificateFile = boto.config.get("Trac", "SSLCertificateFile") + if SSLCertificateFile: + cnf.write("\tSSLEngine On\n") + cnf.write("\tSSLCertificateFile %s\n" % SSLCertificateFile) + + SSLCertificateKeyFile = boto.config.get("Trac", "SSLCertificateKeyFile") + if SSLCertificateKeyFile: + cnf.write("\tSSLCertificateKeyFile %s\n" % SSLCertificateKeyFile) + + SSLCertificateChainFile = boto.config.get("Trac", "SSLCertificateChainFile") + if SSLCertificateChainFile: + cnf.write("\tSSLCertificateChainFile %s\n" % SSLCertificateChainFile) + cnf.write("\n") + cnf.close() + self.run("a2ensite %s" % domain_info[0]) + self.run("/etc/init.d/apache2 force-reload") + + def main(self): + self.install() + self.setup_vhost() diff --git a/ext/boto/pyami/launch_ami.py b/ext/boto/pyami/launch_ami.py new file mode 100644 index 0000000000..9037217b61 --- /dev/null +++ b/ext/boto/pyami/launch_ami.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt +import sys +import imp +import time +import boto + +usage_string = """ +SYNOPSIS + launch_ami.py -a ami_id [-b script_bucket] [-s script_name] + [-m module] [-c class_name] [-r] + [-g group] [-k key_name] [-n num_instances] + [-w] [extra_data] + Where: + ami_id - the id of the AMI you wish to launch + module - The name of the Python module containing the class you + want to run when the instance is started. If you use this + option the Python module must already be stored on the + instance in a location that is on the Python path. + script_file - The name of a local Python module that you would like + to have copied to S3 and then run on the instance + when it is started. The specified module must be + import'able (i.e. in your local Python path). It + will then be copied to the specified bucket in S3 + (see the -b option). Once the new instance(s) + start up the script will be copied from S3 and then + run locally on the instance. + class_name - The name of the class to be instantiated within the + module or script file specified. + script_bucket - the name of the bucket in which the script will be + stored + group - the name of the security group the instance will run in + key_name - the name of the keypair to use when launching the AMI + num_instances - how many instances of the AMI to launch (default 1) + input_queue_name - Name of SQS to read input messages from + output_queue_name - Name of SQS to write output messages to + extra_data - additional name-value pairs that will be passed as + userdata to the newly launched instance. These should + be of the form "name=value" + The -r option reloads the Python module to S3 without launching + another instance. This can be useful during debugging to allow + you to test a new version of your script without shutting down + your instance and starting up another one. + The -w option tells the script to run synchronously, meaning to + wait until the instance is actually up and running. It then prints + the IP address and internal and external DNS names before exiting. +""" + +def usage(): + print(usage_string) + sys.exit() + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w', + ['ami', 'bucket', 'class', 'group', 'help', + 'inputqueue', 'keypair', 'module', + 'numinstances', 'outputqueue', + 'reload', 'script_name', 'wait']) + except: + usage() + params = {'module_name': None, + 'script_name': None, + 'class_name': None, + 'script_bucket': None, + 'group': 'default', + 'keypair': None, + 'ami': None, + 'num_instances': 1, + 'input_queue_name': None, + 'output_queue_name': None} + reload = None + wait = None + for o, a in opts: + if o in ('-a', '--ami'): + params['ami'] = a + if o in ('-b', '--bucket'): + params['script_bucket'] = a + if o in ('-c', '--class'): + params['class_name'] = a + if o in ('-g', '--group'): + params['group'] = a + if o in ('-h', '--help'): + usage() + if o in ('-i', '--inputqueue'): + params['input_queue_name'] = a + if o in ('-k', '--keypair'): + params['keypair'] = a + if o in ('-m', '--module'): + params['module_name'] = a + if o in ('-n', '--num_instances'): + params['num_instances'] = int(a) + if o in ('-o', '--outputqueue'): + params['output_queue_name'] = a + if o in ('-r', '--reload'): + reload = True + if o in ('-s', '--script'): + params['script_name'] = a + if o in ('-w', '--wait'): + wait = True + + # check required fields + required = ['ami'] + for pname in required: + if not params.get(pname, None): + print('%s is required' % pname) + usage() + if params['script_name']: + # first copy the desired module file to S3 bucket + if reload: + print('Reloading module %s to S3' % params['script_name']) + else: + print('Copying module %s to S3' % params['script_name']) + l = imp.find_module(params['script_name']) + c = boto.connect_s3() + bucket = c.get_bucket(params['script_bucket']) + key = bucket.new_key(params['script_name'] + '.py') + key.set_contents_from_file(l[0]) + params['script_md5'] = key.md5 + # we have everything we need, now build userdata string + l = [] + for k, v in params.items(): + if v: + l.append('%s=%s' % (k, v)) + c = boto.connect_ec2() + l.append('aws_access_key_id=%s' % c.aws_access_key_id) + l.append('aws_secret_access_key=%s' % c.aws_secret_access_key) + for kv in args: + l.append(kv) + s = '|'.join(l) + if not reload: + rs = c.get_all_images([params['ami']]) + img = rs[0] + r = img.run(user_data=s, key_name=params['keypair'], + security_groups=[params['group']], + max_count=params.get('num_instances', 1)) + print('AMI: %s - %s (Started)' % (params['ami'], img.location)) + print('Reservation %s contains the following instances:' % r.id) + for i in r.instances: + print('\t%s' % i.id) + if wait: + running = False + while not running: + time.sleep(30) + [i.update() for i in r.instances] + status = [i.state for i in r.instances] + print(status) + if status.count('running') == len(r.instances): + running = True + for i in r.instances: + print('Instance: %s' % i.ami_launch_index) + print('Public DNS Name: %s' % i.public_dns_name) + print('Private DNS Name: %s' % i.private_dns_name) + +if __name__ == "__main__": + main() diff --git a/ext/boto/pyami/scriptbase.py b/ext/boto/pyami/scriptbase.py new file mode 100644 index 0000000000..d99a2b46e0 --- /dev/null +++ b/ext/boto/pyami/scriptbase.py @@ -0,0 +1,43 @@ +import os +import sys +from boto.utils import ShellCommand, get_ts +import boto +import boto.utils + +class ScriptBase(object): + + def __init__(self, config_file=None): + self.instance_id = boto.config.get('Instance', 'instance-id', 'default') + self.name = self.__class__.__name__ + self.ts = get_ts() + if config_file: + boto.config.read(config_file) + + def notify(self, subject, body=''): + boto.utils.notify(subject, body) + + def mkdir(self, path): + if not os.path.isdir(path): + try: + os.mkdir(path) + except: + boto.log.error('Error creating directory: %s' % path) + + def umount(self, path): + if os.path.ismount(path): + self.run('umount %s' % path) + + def run(self, command, notify=True, exit_on_error=False, cwd=None): + self.last_command = ShellCommand(command, cwd=cwd) + if self.last_command.status != 0: + boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output)) + if notify: + self.notify('Error encountered', + 'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \ + (command, self.last_command.output)) + if exit_on_error: + sys.exit(-1) + return self.last_command.status + + def main(self): + pass diff --git a/ext/boto/pyami/startup.py b/ext/boto/pyami/startup.py new file mode 100644 index 0000000000..4bd9dadd89 --- /dev/null +++ b/ext/boto/pyami/startup.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import sys +import boto +from boto.utils import find_class +from boto import config +from boto.pyami.scriptbase import ScriptBase + + +class Startup(ScriptBase): + + def run_scripts(self): + scripts = config.get('Pyami', 'scripts') + if scripts: + for script in scripts.split(','): + script = script.strip(" ") + try: + pos = script.rfind('.') + if pos > 0: + mod_name = script[0:pos] + cls_name = script[pos + 1:] + cls = find_class(mod_name, cls_name) + boto.log.info('Running Script: %s' % script) + s = cls() + s.main() + else: + boto.log.warning('Trouble parsing script: %s' % script) + except Exception as e: + boto.log.exception('Problem Running Script: %s. Startup process halting.' % script) + raise e + + def main(self): + self.run_scripts() + self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) + +if __name__ == "__main__": + if not config.has_section('loggers'): + boto.set_file_logger('startup', '/var/log/boto.log') + sys.path.append(config.get('Pyami', 'working_dir')) + su = Startup() + su.main() diff --git a/ext/boto/rds/__init__.py b/ext/boto/rds/__init__.py new file mode 100644 index 0000000000..9e9132c9ae --- /dev/null +++ b/ext/boto/rds/__init__.py @@ -0,0 +1,1622 @@ +# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import urllib +from boto.connection import AWSQueryConnection +from boto.rds.dbinstance import DBInstance +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.optiongroup import OptionGroup, OptionGroupOption +from boto.rds.parametergroup import ParameterGroup +from boto.rds.dbsnapshot import DBSnapshot +from boto.rds.event import Event +from boto.rds.regioninfo import RDSRegionInfo +from boto.rds.dbsubnetgroup import DBSubnetGroup +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.regioninfo import get_regions +from boto.regioninfo import connect +from boto.rds.logfile import LogFile, LogFileObject + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo` + """ + return get_regions( + 'rds', + region_cls=RDSRegionInfo, + connection_cls=RDSConnection + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('rds', region_name, region_cls=RDSRegionInfo, + connection_cls=RDSConnection, **kw_params) + +#boto.set_stream_logger('rds') + + +class RDSConnection(AWSQueryConnection): + + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'rds.amazonaws.com' + APIVersion = '2013-05-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RDSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(RDSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + # DB Instance methods + + def get_all_dbinstances(self, instance_id=None, max_records=None, + marker=None): + """ + Retrieve all the DBInstances in your account. + + :type instance_id: str + :param instance_id: DB Instance identifier. If supplied, only + information this instance will be returned. + Otherwise, info about all DB Instances will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbinstance.DBInstance` + """ + params = {} + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBInstances', params, + [('DBInstance', DBInstance)]) + + def create_dbinstance(self, + id, + allocated_storage, + instance_class, + master_username, + master_password, + port=3306, + engine='MySQL5.1', + db_name=None, + param_group=None, + security_groups=None, + availability_zone=None, + preferred_maintenance_window=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + engine_version=None, + auto_minor_version_upgrade=True, + character_set_name = None, + db_subnet_group_name = None, + license_model = None, + option_group_name = None, + iops=None, + vpc_security_groups=None, + ): + # API version: 2013-09-09 + # Parameter notes: + # ================= + # id should be db_instance_identifier according to API docs but has been left + # id for backwards compatibility + # + # security_groups should be db_security_groups according to API docs but has been left + # security_groups for backwards compatibility + # + # master_password should be master_user_password according to API docs but has been left + # master_password for backwards compatibility + # + # instance_class should be db_instance_class according to API docs but has been left + # instance_class for backwards compatibility + """ + Create a new DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: int + :param allocated_storage: Initially allocated storage size, in GBs. + Valid values are depending on the engine value. + + * MySQL = 5--3072 + * oracle-se1 = 10--3072 + * oracle-se = 10--3072 + * oracle-ee = 10--3072 + * sqlserver-ee = 200--1024 + * sqlserver-se = 200--1024 + * sqlserver-ex = 30--1024 + * sqlserver-web = 30--1024 + * postgres = 5--3072 + + :type instance_class: str + :param instance_class: The compute and memory capacity of + the DBInstance. Valid values are: + + * db.t1.micro + * db.m1.small + * db.m1.medium + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type engine: str + :param engine: Name of database engine. Defaults to MySQL but can be; + + * MySQL + * oracle-se1 + * oracle-se + * oracle-ee + * sqlserver-ee + * sqlserver-se + * sqlserver-ex + * sqlserver-web + * postgres + + :type master_username: str + :param master_username: Name of master user for the DBInstance. + + * MySQL must be; + - 1--16 alphanumeric characters + - first character must be a letter + - cannot be a reserved MySQL word + + * Oracle must be: + - 1--30 alphanumeric characters + - first character must be a letter + - cannot be a reserved Oracle word + + * SQL Server must be: + - 1--128 alphanumeric characters + - first character must be a letter + - cannot be a reserver SQL Server word + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + + * MySQL must be 8--41 alphanumeric characters + + * Oracle must be 8--30 alphanumeric characters + + * SQL Server must be 8--128 alphanumeric characters. + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. + + * MySQL defaults to 3306 + + * Oracle defaults to 1521 + + * SQL Server defaults to 1433 and _cannot_ be 1434, 3389, + 47001, 49152, and 49152 through 49156. + + * PostgreSQL defaults to 5432 + + :type db_name: str + :param db_name: * MySQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--64 alphanumeric characters and cannot + be a reserved MySQL word. + + * Oracle: + The Oracle System ID (SID) of the created DB instances. + Default is ORCL. Cannot be longer than 8 characters. + + * SQL Server: + Not applicable and must be None. + + * PostgreSQL: + Name of a database to create when the DBInstance + is created. Default is to create no databases. + + Must contain 1--63 alphanumeric characters. Must + begin with a letter or an underscore. Subsequent + characters can be letters, underscores, or digits (0-9) + and cannot be a reserved PostgreSQL word. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can occur. + Default is Sun:05:00-Sun:09:00 + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + For Microsoft SQL Server, must be set to false. You cannot set + the AvailabilityZone parameter if the MultiAZ parameter is + set to true. + + :type engine_version: str + :param engine_version: The version number of the database engine to use. + + * MySQL format example: 5.1.42 + + * Oracle format example: 11.2.0.2.v2 + + * SQL Server format example: 10.50.2789.0.v1 + + * PostgreSQL format example: 9.3 + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is True. + :type character_set_name: str + :param character_set_name: For supported engines, indicates that the DB Instance + should be associated with the specified CharacterSet. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :type license_model: str + :param license_model: License model information for this DB Instance. + + Valid values are; + - license-included + - bring-your-own-license + - general-public-license + + All license types are not supported on all engines. + + :type option_group_name: str + :param option_group_name: Indicates that the DB Instance should be associated + with the specified option group. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a list of + VPCSecurityGroupMembership objects this DBInstance should be a member of + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + # boto argument alignment with AWS API parameter names: + # ===================================================== + # arg => AWS parameter + # allocated_storage => AllocatedStorage + # auto_minor_version_update => AutoMinorVersionUpgrade + # availability_zone => AvailabilityZone + # backup_retention_period => BackupRetentionPeriod + # character_set_name => CharacterSetName + # db_instance_class => DBInstanceClass + # db_instance_identifier => DBInstanceIdentifier + # db_name => DBName + # db_parameter_group_name => DBParameterGroupName + # db_security_groups => DBSecurityGroups.member.N + # db_subnet_group_name => DBSubnetGroupName + # engine => Engine + # engine_version => EngineVersion + # license_model => LicenseModel + # master_username => MasterUsername + # master_user_password => MasterUserPassword + # multi_az => MultiAZ + # option_group_name => OptionGroupName + # port => Port + # preferred_backup_window => PreferredBackupWindow + # preferred_maintenance_window => PreferredMaintenanceWindow + # vpc_security_groups => VpcSecurityGroupIds.member.N + params = { + 'AllocatedStorage': allocated_storage, + 'AutoMinorVersionUpgrade': str(auto_minor_version_upgrade).lower() if auto_minor_version_upgrade else None, + 'AvailabilityZone': availability_zone, + 'BackupRetentionPeriod': backup_retention_period, + 'CharacterSetName': character_set_name, + 'DBInstanceClass': instance_class, + 'DBInstanceIdentifier': id, + 'DBName': db_name, + 'DBParameterGroupName': (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group), + 'DBSubnetGroupName': db_subnet_group_name, + 'Engine': engine, + 'EngineVersion': engine_version, + 'Iops': iops, + 'LicenseModel': license_model, + 'MasterUsername': master_username, + 'MasterUserPassword': master_password, + 'MultiAZ': str(multi_az).lower() if multi_az else None, + 'OptionGroupName': option_group_name, + 'Port': port, + 'PreferredBackupWindow': preferred_backup_window, + 'PreferredMaintenanceWindow': preferred_maintenance_window, + } + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + + # Remove any params set to None + for k, v in list(params.items()): + if v is None: del(params[k]) + + return self.get_object('CreateDBInstance', params, DBInstance) + + def create_dbinstance_read_replica(self, id, source_id, + instance_class=None, + port=3306, + availability_zone=None, + auto_minor_version_upgrade=None): + """ + Create a new DBInstance Read Replica. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type source_id: str + :param source_id: Unique identifier for the DB Instance for which this + DB Instance will act as a Read Replica. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Default is to inherit from + the source DB Instance. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Default is to inherit from source DB Instance. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is to inherit this value + from the source DB Instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier': id, + 'SourceDBInstanceIdentifier': source_id} + if instance_class: + params['DBInstanceClass'] = instance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if auto_minor_version_upgrade is not None: + if auto_minor_version_upgrade is True: + params['AutoMinorVersionUpgrade'] = 'true' + else: + params['AutoMinorVersionUpgrade'] = 'false' + + return self.get_object('CreateDBInstanceReadReplica', + params, DBInstance) + + + def promote_read_replica(self, id, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promote a Read Replica to a standalone DB Instance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier': id} + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + + return self.get_object('PromoteReadReplica', params, DBInstance) + + + def modify_dbinstance(self, id, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + apply_immediately=False, + iops=None, + vpc_security_groups=None, + new_instance_id=None, + ): + """ + Modify an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type param_group: str or ParameterGroup object + :param param_group: Name of DBParameterGroup or ParameterGroup instance + to associate with this DBInstance. If no groups are + specified no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to authorize on + this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) + during which maintenance can + occur. + Default is Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at + next maintenance window unless + apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be applied + as soon as possible rather than waiting for + the next preferred maintenance window. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per second) to Provisioned + for the DB Instance. Can be modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you must allocated + 100 GB of storage space. This scales up to 1 TB / 10 000 IOPS for MySQL + and Oracle. MSSQL is limited to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and you must + allocate 100 GB of storage. + + :type vpc_security_groups: list of str or a VPCSecurityGroupMembership object + :param vpc_security_groups: List of VPC security group ids or a + VPCSecurityGroupMembership object this DBInstance should be a member of + + :type new_instance_id: str + :param new_instance_id: New name to rename the DBInstance to. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + params = {'DBInstanceIdentifier': id} + if param_group: + params['DBParameterGroupName'] = (param_group.name + if isinstance(param_group, ParameterGroup) + else param_group) + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + if vpc_security_groups: + l = [] + for vpc_grp in vpc_security_groups: + if isinstance(vpc_grp, VPCSecurityGroupMembership): + l.append(vpc_grp.vpc_group) + else: + l.append(vpc_grp) + self.build_list_params(params, l, 'VpcSecurityGroupIds.member') + if preferred_maintenance_window: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if master_password: + params['MasterUserPassword'] = master_password + if allocated_storage: + params['AllocatedStorage'] = allocated_storage + if instance_class: + params['DBInstanceClass'] = instance_class + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + if multi_az: + params['MultiAZ'] = 'true' + if apply_immediately: + params['ApplyImmediately'] = 'true' + if iops: + params['Iops'] = iops + if new_instance_id: + params['NewDBInstanceIdentifier'] = new_instance_id + + return self.get_object('ModifyDBInstance', params, DBInstance) + + def delete_dbinstance(self, id, skip_final_snapshot=False, + final_snapshot_id=''): + """ + Delete an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether a final + db snapshot is created before the instance + is deleted. If True, no snapshot + is created. If False, a snapshot + is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + params = {'DBInstanceIdentifier': id} + if skip_final_snapshot: + params['SkipFinalSnapshot'] = 'true' + else: + params['SkipFinalSnapshot'] = 'false' + params['FinalDBSnapshotIdentifier'] = final_snapshot_id + return self.get_object('DeleteDBInstance', params, DBInstance) + + def reboot_dbinstance(self, id): + """ + Reboot DBInstance. + + :type id: str + :param id: Unique identifier of the instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The rebooting db instance. + """ + params = {'DBInstanceIdentifier': id} + return self.get_object('RebootDBInstance', params, DBInstance) + + # DBParameterGroup methods + + def get_all_dbparameter_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all parameter groups associated with your account in a region. + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + If not provided, all DBParameter groups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.ec2.parametergroup.ParameterGroup` + """ + params = {} + if groupname: + params['DBParameterGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBParameterGroups', params, + [('DBParameterGroup', ParameterGroup)]) + + def get_all_dbparameters(self, groupname, source=None, + max_records=None, marker=None): + """ + Get all parameters associated with a ParameterGroup + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + + :type source: str + :param source: Specifies which parameters to return. + If not specified, all parameters will be returned. + Valid values are: user|system|engine-default + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: :class:`boto.ec2.parametergroup.ParameterGroup` + :return: The ParameterGroup + """ + params = {'DBParameterGroupName': groupname} + if source: + params['Source'] = source + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + pg = self.get_object('DescribeDBParameters', params, ParameterGroup) + pg.name = groupname + return pg + + def create_parameter_group(self, name, engine='MySQL5.1', description=''): + """ + Create a new dbparameter group for your account. + + :type name: string + :param name: The name of the new dbparameter group + + :type engine: str + :param engine: Name of database engine. + + :type description: string + :param description: The description of the new dbparameter group + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name, + 'DBParameterGroupFamily': engine, + 'Description': description} + return self.get_object('CreateDBParameterGroup', params, ParameterGroup) + + def modify_parameter_group(self, name, parameters=None): + """ + Modify a ParameterGroup for your account. + + :type name: string + :param name: The name of the new ParameterGroup + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name} + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_list('ModifyDBParameterGroup', params, + ParameterGroup, verb='POST') + + def reset_parameter_group(self, name, reset_all_params=False, + parameters=None): + """ + Resets some or all of the parameters of a ParameterGroup to the + default value + + :type key_name: string + :param key_name: The name of the ParameterGroup to reset + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The parameters to reset. If not supplied, + all parameters will be reset. + """ + params = {'DBParameterGroupName': name} + if reset_all_params: + params['ResetAllParameters'] = 'true' + else: + params['ResetAllParameters'] = 'false' + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_status('ResetDBParameterGroup', params) + + def delete_parameter_group(self, name): + """ + Delete a ParameterGroup from your account. + + :type key_name: string + :param key_name: The name of the ParameterGroup to delete + """ + params = {'DBParameterGroupName': name} + return self.get_status('DeleteDBParameterGroup', params) + + # DBSecurityGroup methods + + def get_all_dbsecurity_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + """ + params = {} + if groupname: + params['DBSecurityGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSecurityGroups', params, + [('DBSecurityGroup', DBSecurityGroup)]) + + def create_dbsecurity_group(self, name, description=None): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + :return: The newly created DBSecurityGroup + """ + params = {'DBSecurityGroupName': name} + if description: + params['DBSecurityGroupDescription'] = description + group = self.get_object('CreateDBSecurityGroup', params, + DBSecurityGroup) + group.name = name + group.description = description + return group + + def delete_dbsecurity_group(self, name): + """ + Delete a DBSecurityGroup from your account. + + :type key_name: string + :param key_name: The name of the DBSecurityGroup to delete + """ + params = {'DBSecurityGroupName': name} + return self.get_status('DeleteDBSecurityGroup', params) + + def authorize_dbsecurity_group(self, group_name, cidr_ip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR a CIDR block but not both. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + you are granting access to. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security group you are granting + access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = urllib.quote(cidr_ip) + return self.get_object('AuthorizeDBSecurityGroupIngress', params, + DBSecurityGroup) + + def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None, + ec2_security_group_owner_id=None, cidr_ip=None): + """ + Remove an existing rule from an existing security group. + You need to pass in either ec2_security_group_name and + ec2_security_group_owner_id OR a CIDR block. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group + from which you are removing access. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 + security from which you are + removing access. + + :type cidr_ip: string + :param cidr_ip: The CIDR block from which you are removing access. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName': group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = cidr_ip + return self.get_object('RevokeDBSecurityGroupIngress', params, + DBSecurityGroup) + + # For backwards compatibility. This method was improperly named + # in previous versions. I have renamed it to match the others. + revoke_security_group = revoke_dbsecurity_group + + # DBSnapshot methods + + def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None, + max_records=None, marker=None): + """ + Get information about DB Snapshots. + + :type snapshot_id: str + :param snapshot_id: The unique identifier of an RDS snapshot. + If not provided, all RDS snapshots will be returned. + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. If provided, + only the DBSnapshots related to that instance will + be returned. + If not provided, all RDS snapshots will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot` + """ + params = {} + if snapshot_id: + params['DBSnapshotIdentifier'] = snapshot_id + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSnapshots', params, + [('DBSnapshot', DBSnapshot)]) + + def get_all_logs(self, dbinstance_id, max_records=None, marker=None, file_size=None, filename_contains=None, file_last_written=None): + """ + Get all log files + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type max_records: int + :param max_records: Number of log file names to return. + + :type marker: str + :param marker: The marker provided by a previous request. + + :file_size: int + :param file_size: Filter results to files large than this size in bytes. + + :filename_contains: str + :param filename_contains: Filter results to files with filename containing this string + + :file_last_written: int + :param file_last_written: Filter results to files written after this time (POSIX timestamp) + + :rtype: list + :return: A list of :class:`boto.rds.logfile.LogFile` + """ + params = {'DBInstanceIdentifier': dbinstance_id} + + if file_size: + params['FileSize'] = file_size + + if filename_contains: + params['FilenameContains'] = filename_contains + + if file_last_written: + params['FileLastWritten'] = file_last_written + + if marker: + params['Marker'] = marker + + if max_records: + params['MaxRecords'] = max_records + + return self.get_list('DescribeDBLogFiles', params, + [('DescribeDBLogFilesDetails',LogFile)]) + + def get_log_file(self, dbinstance_id, log_file_name, marker=None, number_of_lines=None, max_records=None): + """ + Download a log file from RDS + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. + + :type log_file_name: str + :param log_file_name: The name of the log file to retrieve + + :type marker: str + :param marker: A marker returned from a previous call to this method, or 0 to indicate the start of file. If + no marker is specified, this will fetch log lines from the end of file instead. + + :type number_of_lines: int + :param marker: The maximium number of lines to be returned. + """ + + params = { + 'DBInstanceIdentifier': dbinstance_id, + 'LogFileName': log_file_name, + } + + if marker: + params['Marker'] = marker + + if number_of_lines: + params['NumberOfLines'] = number_of_lines + + if max_records: + params['MaxRecords'] = max_records + + logfile = self.get_object('DownloadDBLogFilePortion', params, LogFileObject) + + if logfile: + logfile.log_filename = log_file_name + logfile.dbinstance_id = dbinstance_id + + return logfile + + def create_dbsnapshot(self, snapshot_id, dbinstance_id): + """ + Create a new DB snapshot. + + :type snapshot_id: string + :param snapshot_id: The identifier for the DBSnapshot + + :type dbinstance_id: string + :param dbinstance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + params = {'DBSnapshotIdentifier': snapshot_id, + 'DBInstanceIdentifier': dbinstance_id} + return self.get_object('CreateDBSnapshot', params, DBSnapshot) + + def copy_dbsnapshot(self, source_snapshot_id, target_snapshot_id): + """ + Copies the specified DBSnapshot. + + :type source_snapshot_id: string + :param source_snapshot_id: The identifier for the source DB snapshot. + + :type target_snapshot_id: string + :param target_snapshot_id: The identifier for the copied snapshot. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot. + """ + params = {'SourceDBSnapshotIdentifier': source_snapshot_id, + 'TargetDBSnapshotIdentifier': target_snapshot_id} + return self.get_object('CopyDBSnapshot', params, DBSnapshot) + + def delete_dbsnapshot(self, identifier): + """ + Delete a DBSnapshot + + :type identifier: string + :param identifier: The identifier of the DBSnapshot to delete + """ + params = {'DBSnapshotIdentifier': identifier} + return self.get_object('DeleteDBSnapshot', params, DBSnapshot) + + def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id, + instance_class, port=None, + availability_zone=None, + multi_az=None, + auto_minor_version_upgrade=None, + db_subnet_group_name=None): + """ + Create a new DBInstance from a DB snapshot. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :type instance_id: string + :param instance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + Default is the API default. + + :type auto_minor_version_upgrade: bool + :param auto_minor_version_upgrade: Indicates that minor engine + upgrades will be applied + automatically to the Read Replica + during the maintenance window. + Default is the API default. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'DBSnapshotIdentifier': identifier, + 'DBInstanceIdentifier': instance_id, + 'DBInstanceClass': instance_class} + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if multi_az is not None: + params['MultiAZ'] = str(multi_az).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str(auto_minor_version_upgrade).lower() + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceFromDBSnapshot', + params, DBInstance) + + def restore_dbinstance_from_point_in_time(self, source_instance_id, + target_instance_id, + use_latest=False, + restore_time=None, + dbinstance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None): + + """ + Create a new DBInstance from a point in time. + + :type source_instance_id: string + :param source_instance_id: The identifier for the source DBInstance. + + :type target_instance_id: string + :param target_instance_id: The identifier of the new DBInstance. + + :type use_latest: bool + :param use_latest: If True, the latest snapshot availabile will + be used. + + :type restore_time: datetime + :param restore_time: The date and time to restore from. Only + used if use_latest is False. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type db_subnet_group_name: str + :param db_subnet_group_name: A DB Subnet Group to associate with this DB Instance. + If there is no DB Subnet Group, then it is a non-VPC DB + instance. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'SourceDBInstanceIdentifier': source_instance_id, + 'TargetDBInstanceIdentifier': target_instance_id} + if use_latest: + params['UseLatestRestorableTime'] = 'true' + elif restore_time: + params['RestoreTime'] = restore_time.isoformat() + if dbinstance_class: + params['DBInstanceClass'] = dbinstance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + return self.get_object('RestoreDBInstanceToPointInTime', + params, DBInstance) + + # Events + + def get_all_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, + max_records=None, marker=None): + """ + Get information about events related to your DBInstances, + DBSecurityGroups and DBParameterGroups. + + :type source_identifier: str + :param source_identifier: If supplied, the events returned will be + limited to those that apply to the identified + source. The value of this parameter depends + on the value of source_type. If neither + parameter is specified, all events in the time + span will be returned. + + :type source_type: str + :param source_type: Specifies how the source_identifier should + be interpreted. Valid values are: + b-instance | db-security-group | + db-parameter-group | db-snapshot + + :type start_time: datetime + :param start_time: The beginning of the time interval for events. + If not supplied, all available events will + be returned. + + :type end_time: datetime + :param end_time: The ending of the time interval for events. + If not supplied, all available events will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.event.Event` + """ + params = {} + if source_identifier and source_type: + params['SourceIdentifier'] = source_identifier + params['SourceType'] = source_type + if start_time: + params['StartTime'] = start_time.isoformat() + if end_time: + params['EndTime'] = end_time.isoformat() + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeEvents', params, [('Event', Event)]) + + def create_db_subnet_group(self, name, desc, subnet_ids): + """ + Create a new Database Subnet Group. + + :type name: string + :param name: The identifier for the db_subnet_group + + :type desc: string + :param desc: A description of the db_subnet_group + + :type subnet_ids: list + :param subnets: A list of the subnet identifiers to include in the + db_subnet_group + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup + :return: the created db_subnet_group + """ + + params = {'DBSubnetGroupName': name, + 'DBSubnetGroupDescription': desc} + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('CreateDBSubnetGroup', params, DBSubnetGroup) + + def delete_db_subnet_group(self, name): + """ + Delete a Database Subnet Group. + + :type name: string + :param name: The identifier of the db_subnet_group to delete + + :rtype: :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + :return: The deleted db_subnet_group. + """ + + params = {'DBSubnetGroupName': name} + + return self.get_object('DeleteDBSubnetGroup', params, DBSubnetGroup) + + + def get_all_db_subnet_groups(self, name=None, max_records=None, marker=None): + """ + Retrieve all the DBSubnetGroups in your account. + + :type name: str + :param name: DBSubnetGroup name If supplied, only information about + this DBSubnetGroup will be returned. Otherwise, info + about all DBSubnetGroups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a Token will be + returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsubnetgroup.DBSubnetGroup` + """ + params = dict() + if name is not None: + params['DBSubnetGroupName'] = name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + + return self.get_list('DescribeDBSubnetGroups', params, [('DBSubnetGroup',DBSubnetGroup)]) + + def modify_db_subnet_group(self, name, description=None, subnet_ids=None): + """ + Modify a parameter group for your account. + + :type name: string + :param name: The name of the new parameter group + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBSubnetGroupName': name} + if description is not None: + params['DBSubnetGroupDescription'] = description + if subnet_ids is not None: + self.build_list_params(params, subnet_ids, 'SubnetIds.member') + + return self.get_object('ModifyDBSubnetGroup', params, DBSubnetGroup) + + def create_option_group(self, name, engine_name, major_engine_version, + description=None): + """ + Create a new option group for your account. + This will create the option group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new option group + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be + associated with. + + :type description: string + :param description: The description of the new option group + + :rtype: :class:`boto.rds.optiongroup.OptionGroup` + :return: The newly created OptionGroup + """ + params = { + 'OptionGroupName': name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': description, + } + group = self.get_object('CreateOptionGroup', params, OptionGroup) + group.name = name + group.engine_name = engine_name + group.major_engine_version = major_engine_version + group.description = description + return group + + def delete_option_group(self, name): + """ + Delete an OptionGroup from your account. + + :type key_name: string + :param key_name: The name of the OptionGroup to delete + """ + params = {'OptionGroupName': name} + return self.get_status('DeleteOptionGroup', params) + + def describe_option_groups(self, name=None, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option groups. + + :type name: str + :param name: The name of the option group to describe. Cannot be + supplied together with engine_name or major_engine_version. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.OptionGroup` + """ + params = {} + if name: + params['OptionGroupName'] = name + elif engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroups', params, [ + ('OptionGroup', OptionGroup) + ]) + + def describe_option_group_options(self, engine_name=None, + major_engine_version=None, max_records=100, + marker=None): + """ + Describes the available option group options. + + :type engine_name: str + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: datetime + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific + database engine version. If specified, then + engine_name must also be specified. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.optiongroup.Option` + """ + params = {} + if engine_name and major_engine_version: + params['EngineName'] = engine_name + params['MajorEngineVersion'] = major_engine_version + if max_records: + params['MaxRecords'] = int(max_records) + if marker: + params['Marker'] = marker + return self.get_list('DescribeOptionGroupOptions', params, [ + ('OptionGroupOptions', OptionGroupOption) + ]) diff --git a/ext/boto/rds/dbinstance.py b/ext/boto/rds/dbinstance.py new file mode 100644 index 0000000000..6a6385103d --- /dev/null +++ b/ext/boto/rds/dbinstance.py @@ -0,0 +1,416 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.parametergroup import ParameterGroup +from boto.rds.statusinfo import StatusInfo +from boto.rds.dbsubnetgroup import DBSubnetGroup +from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership +from boto.resultset import ResultSet + + +class DBInstance(object): + """ + Represents a RDS DBInstance + + Properties reference available from the AWS documentation at + http://goo.gl/sC2Kn + + :ivar connection: connection + :ivar id: The name and identifier of the DBInstance + :ivar create_time: The date and time of creation + :ivar engine: The database engine being used + :ivar status: The status of the database in a string. e.g. "available" + :ivar allocated_storage: The size of the disk in gigabytes (int). + :ivar auto_minor_version_upgrade: Indicates that minor version patches + are applied automatically. + :ivar endpoint: A tuple that describes the hostname and port of + the instance. This is only available when the database is + in status "available". + :ivar instance_class: Contains the name of the compute and memory + capacity class of the DB Instance. + :ivar master_username: The username that is set as master username + at creation time. + :ivar parameter_groups: Provides the list of DB Parameter Groups + applied to this DB Instance. + :ivar security_groups: Provides List of DB Security Group elements + containing only DBSecurityGroup.Name and DBSecurityGroup.Status + subelements. + :ivar availability_zone: Specifies the name of the Availability Zone + the DB Instance is located in. + :ivar backup_retention_period: Specifies the number of days for + which automatic DB Snapshots are retained. + :ivar preferred_backup_window: Specifies the daily time range during + which automated backups are created if automated backups are + enabled, as determined by the backup_retention_period. + :ivar preferred_maintenance_window: Specifies the weekly time + range (in UTC) during which system maintenance can occur. (string) + :ivar latest_restorable_time: Specifies the latest time to which + a database can be restored with point-in-time restore. (string) + :ivar multi_az: Boolean that specifies if the DB Instance is a + Multi-AZ deployment. + :ivar iops: The current number of provisioned IOPS for the DB Instance. + Can be None if this is a standard instance. + :ivar vpc_security_groups: List of VPC Security Group Membership elements + containing only VpcSecurityGroupMembership.VpcSecurityGroupId and + VpcSecurityGroupMembership.Status subelements. + :ivar pending_modified_values: Specifies that changes to the + DB Instance are pending. This element is only included when changes + are pending. Specific changes are identified by subelements. + :ivar read_replica_dbinstance_identifiers: List of read replicas + associated with this DB instance. + :ivar status_infos: The status of a Read Replica. If the instance is not a + for a read replica, this will be blank. + :ivar character_set_name: If present, specifies the name of the character + set that this instance is associated with. + :ivar subnet_group: Specifies information on the subnet group associated + with the DB instance, including the name, description, and subnets + in the subnet group. + :ivar engine_version: Indicates the database engine version. + :ivar license_model: License model information for this DB instance. + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.create_time = None + self.engine = None + self.status = None + self.allocated_storage = None + self.auto_minor_version_upgrade = None + self.endpoint = None + self.instance_class = None + self.master_username = None + self.parameter_groups = [] + self.security_groups = [] + self.read_replica_dbinstance_identifiers = [] + self.availability_zone = None + self.backup_retention_period = None + self.preferred_backup_window = None + self.preferred_maintenance_window = None + self.latest_restorable_time = None + self.multi_az = False + self.iops = None + self.vpc_security_groups = None + self.pending_modified_values = None + self._in_endpoint = False + self._port = None + self._address = None + self.status_infos = None + self.character_set_name = None + self.subnet_group = None + self.engine_version = None + self.license_model = None + + def __repr__(self): + return 'DBInstance:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Endpoint': + self._in_endpoint = True + elif name == 'DBParameterGroups': + self.parameter_groups = ResultSet([('DBParameterGroup', + ParameterGroup)]) + return self.parameter_groups + elif name == 'DBSecurityGroups': + self.security_groups = ResultSet([('DBSecurityGroup', + DBSecurityGroup)]) + return self.security_groups + elif name == 'VpcSecurityGroups': + self.vpc_security_groups = ResultSet([('VpcSecurityGroupMembership', + VPCSecurityGroupMembership)]) + return self.vpc_security_groups + elif name == 'PendingModifiedValues': + self.pending_modified_values = PendingModifiedValues() + return self.pending_modified_values + elif name == 'ReadReplicaDBInstanceIdentifiers': + self.read_replica_dbinstance_identifiers = \ + ReadReplicaDBInstanceIdentifiers() + return self.read_replica_dbinstance_identifiers + elif name == 'StatusInfos': + self.status_infos = ResultSet([ + ('DBInstanceStatusInfo', StatusInfo) + ]) + return self.status_infos + elif name == 'DBSubnetGroup': + self.subnet_group = DBSubnetGroup() + return self.subnet_group + return None + + def endElement(self, name, value, connection): + if name == 'DBInstanceIdentifier': + self.id = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'InstanceCreateTime': + self.create_time = value + elif name == 'Engine': + self.engine = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'AutoMinorVersionUpgrade': + self.auto_minor_version_upgrade = value.lower() == 'true' + elif name == 'DBInstanceClass': + self.instance_class = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'Port': + if self._in_endpoint: + self._port = int(value) + elif name == 'Address': + if self._in_endpoint: + self._address = value + elif name == 'Endpoint': + self.endpoint = (self._address, self._port) + self._in_endpoint = False + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'BackupRetentionPeriod': + self.backup_retention_period = int(value) + elif name == 'LatestRestorableTime': + self.latest_restorable_time = value + elif name == 'PreferredMaintenanceWindow': + self.preferred_maintenance_window = value + elif name == 'PreferredBackupWindow': + self.preferred_backup_window = value + elif name == 'MultiAZ': + if value.lower() == 'true': + self.multi_az = True + elif name == 'Iops': + self.iops = int(value) + elif name == 'CharacterSetName': + self.character_set_name = value + elif name == 'EngineVersion': + self.engine_version = value + elif name == 'LicenseModel': + self.license_model = value + else: + setattr(self, name, value) + + @property + def security_group(self): + """ + Provide backward compatibility for previous security_group + attribute. + """ + if len(self.security_groups) > 0: + return self.security_groups[-1] + else: + return None + + @property + def parameter_group(self): + """ + Provide backward compatibility for previous parameter_group + attribute. + """ + if len(self.parameter_groups) > 0: + return self.parameter_groups[-1] + else: + return None + + def snapshot(self, snapshot_id): + """ + Create a new DB snapshot of this DBInstance. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.create_dbsnapshot(snapshot_id, self.id) + + def reboot(self): + """ + Reboot this DBInstance + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.reboot_dbinstance(self.id) + + def update(self, validate=False): + """ + Update the DB instance's status information by making a call to fetch + the current instance attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If the + validate param is True, however, it will raise a + ValueError exception if no data is returned from EC2. + """ + rs = self.connection.get_all_dbinstances(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Instance ID' % self.id) + return self.status + + def stop(self, skip_final_snapshot=False, final_snapshot_id=''): + """ + Delete this DBInstance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether + a final db snapshot is created before the instance is + deleted. If True, no snapshot is created. If False, a + snapshot is created before deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + return self.connection.delete_dbinstance(self.id, + skip_final_snapshot, + final_snapshot_id) + + def modify(self, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + multi_az=False, + iops=None, + vpc_security_groups=None, + apply_immediately=False, + new_instance_id=None): + """ + Modify this DBInstance. + + :type param_group: str + :param param_group: Name of DBParameterGroup to associate with + this DBInstance. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to + authorize on this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in + UTC) during which maintenance can occur. Default is + Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the + DBInstance. Changes will be applied at next maintenance + window unless apply_immediately is True. + + Valid values are: + + * db.m1.small + * db.m1.large + * db.m1.xlarge + * db.m2.xlarge + * db.m2.2xlarge + * db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be + applied as soon as possible rather than waiting for the + next preferred maintenance window. + + :type new_instance_id: str + :param new_instance_id: The new DB instance identifier. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which + automated backups are retained. Setting this to zero + disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during + which automated backups are created (if enabled). Must be + in h24:mi-hh24:mi format (UTC). + + :type multi_az: bool + :param multi_az: If True, specifies the DB Instance will be + deployed in multiple availability zones. + + :type iops: int + :param iops: The amount of IOPS (input/output operations per + second) to Provisioned for the DB Instance. Can be + modified at a later date. + + Must scale linearly. For every 1000 IOPS provision, you + must allocated 100 GB of storage space. This scales up to + 1 TB / 10 000 IOPS for MySQL and Oracle. MSSQL is limited + to 700 GB / 7 000 IOPS. + + If you specify a value, it must be at least 1000 IOPS and + you must allocate 100 GB of storage. + + :type vpc_security_groups: list + :param vpc_security_groups: List of VPCSecurityGroupMembership + that this DBInstance is a memberof. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + return self.connection.modify_dbinstance(self.id, + param_group, + security_groups, + preferred_maintenance_window, + master_password, + allocated_storage, + instance_class, + backup_retention_period, + preferred_backup_window, + multi_az, + apply_immediately, + iops, + vpc_security_groups, + new_instance_id) + + +class PendingModifiedValues(dict): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != 'PendingModifiedValues': + self[name] = value + + +class ReadReplicaDBInstanceIdentifiers(list): + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ReadReplicaDBInstanceIdentifier': + self.append(value) diff --git a/ext/boto/rds/dbsecuritygroup.py b/ext/boto/rds/dbsecuritygroup.py new file mode 100644 index 0000000000..378360667d --- /dev/null +++ b/ext/boto/rds/dbsecuritygroup.py @@ -0,0 +1,186 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSecurityGroup +""" +from boto.ec2.securitygroup import SecurityGroup + +class DBSecurityGroup(object): + """ + Represents an RDS database security group + + Properties reference available from the AWS documentation at + http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html + + :ivar Status: The current status of the security group. Possible values are + [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object + :ivar description: The description of the security group + :ivar ec2_groups: List of :py:class:`EC2 Security Group + ` objects that this security + group PERMITS + :ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange` + objects (containing CIDR addresses) that this security group PERMITS + :ivar name: Name of the security group + :ivar owner_id: ID of the owner of the security group. Can be 'None' + """ + def __init__(self, connection=None, owner_id=None, + name=None, description=None): + self.connection = connection + self.owner_id = owner_id + self.name = name + self.description = description + self.ec2_groups = [] + self.ip_ranges = [] + + def __repr__(self): + return 'DBSecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'IPRange': + cidr = IPRange(self) + self.ip_ranges.append(cidr) + return cidr + elif name == 'EC2SecurityGroup': + ec2_grp = EC2SecurityGroup(self) + self.ec2_groups.append(ec2_grp) + return ec2_grp + else: + return None + + def endElement(self, name, value, connection): + if name == 'OwnerId': + self.owner_id = value + elif name == 'DBSecurityGroupName': + self.name = value + elif name == 'DBSecurityGroupDescription': + self.description = value + elif name == 'IPRanges': + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_dbsecurity_group(self.name) + + def authorize(self, cidr_ip=None, ec2_group=None): + """ + Add a new rule to this DBSecurity group. + You need to pass in either a CIDR block to authorize or + and EC2 SecurityGroup. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to authorize + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to authorize + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + else: + group_name = None + group_owner_id = None + return self.connection.authorize_dbsecurity_group(self.name, + cidr_ip, + group_name, + group_owner_id) + + def revoke(self, cidr_ip=None, ec2_group=None): + """ + Revoke access to a CIDR range or EC2 SecurityGroup. + You need to pass in either a CIDR block or + an EC2 SecurityGroup from which to revoke access. + + :type cidr_ip: string + :param cidr_ip: A valid CIDR IP range to revoke + + :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup` + :param ec2_group: An EC2 security group to revoke + + :rtype: bool + :return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + return self.connection.revoke_dbsecurity_group( + self.name, + ec2_security_group_name=group_name, + ec2_security_group_owner_id=group_owner_id) + + # Revoking by CIDR IP range + return self.connection.revoke_dbsecurity_group( + self.name, cidr_ip=cidr_ip) + +class IPRange(object): + """ + Describes a CIDR address range for use in a DBSecurityGroup + + :ivar cidr_ip: IP Address range + """ + + def __init__(self, parent=None): + self.parent = parent + self.cidr_ip = None + self.status = None + + def __repr__(self): + return 'IPRange:%s' % self.cidr_ip + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CIDRIP': + self.cidr_ip = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + +class EC2SecurityGroup(object): + """ + Describes an EC2 security group for use in a DBSecurityGroup + """ + + def __init__(self, parent=None): + self.parent = parent + self.name = None + self.owner_id = None + + def __repr__(self): + return 'EC2SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'EC2SecurityGroupName': + self.name = value + elif name == 'EC2SecurityGroupOwnerId': + self.owner_id = value + else: + setattr(self, name, value) diff --git a/ext/boto/rds/dbsnapshot.py b/ext/boto/rds/dbsnapshot.py new file mode 100644 index 0000000000..16d8125be7 --- /dev/null +++ b/ext/boto/rds/dbsnapshot.py @@ -0,0 +1,138 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class DBSnapshot(object): + """ + Represents a RDS DB Snapshot + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DBSnapshot.html + + :ivar engine_version: Specifies the version of the database engine + :ivar license_model: License model information for the restored DB instance + :ivar allocated_storage: Specifies the allocated storage size in gigabytes (GB) + :ivar availability_zone: Specifies the name of the Availability Zone the DB Instance was located in at the time of the DB Snapshot + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar engine: Specifies the name of the database engine + :ivar id: Specifies the identifier for the DB Snapshot (DBSnapshotIdentifier) + :ivar instance_create_time: Specifies the time (UTC) when the snapshot was taken + :ivar instance_id: Specifies the the DBInstanceIdentifier of the DB Instance this DB Snapshot was created from (DBInstanceIdentifier) + :ivar master_username: Provides the master username for the DB Instance + :ivar port: Specifies the port that the database engine was listening on at the time of the snapshot + :ivar snapshot_create_time: Provides the time (UTC) when the snapshot was taken + :ivar status: Specifies the status of this DB Snapshot. Possible values are [ available, backing-up, creating, deleted, deleting, failed, modifying, rebooting, resetting-master-credentials ] + :ivar iops: Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot. + :ivar option_group_name: Provides the option group name for the DB snapshot. + :ivar percent_progress: The percentage of the estimated data that has been transferred. + :ivar snapshot_type: Provides the type of the DB snapshot. + :ivar source_region: The region that the DB snapshot was created in or copied from. + :ivar vpc_id: Provides the Vpc Id associated with the DB snapshot. + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.engine = None + self.engine_version = None + self.snapshot_create_time = None + self.instance_create_time = None + self.port = None + self.status = None + self.availability_zone = None + self.master_username = None + self.allocated_storage = None + self.instance_id = None + self.availability_zone = None + self.license_model = None + self.iops = None + self.option_group_name = None + self.percent_progress = None + self.snapshot_type = None + self.source_region = None + self.vpc_id = None + + def __repr__(self): + return 'DBSnapshot:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Engine': + self.engine = value + elif name == 'EngineVersion': + self.engine_version = value + elif name == 'InstanceCreateTime': + self.instance_create_time = value + elif name == 'SnapshotCreateTime': + self.snapshot_create_time = value + elif name == 'DBInstanceIdentifier': + self.instance_id = value + elif name == 'DBSnapshotIdentifier': + self.id = value + elif name == 'Port': + self.port = int(value) + elif name == 'Status': + self.status = value + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'SnapshotTime': + self.time = value + elif name == 'LicenseModel': + self.license_model = value + elif name == 'Iops': + self.iops = int(value) + elif name == 'OptionGroupName': + self.option_group_name = value + elif name == 'PercentProgress': + self.percent_progress = int(value) + elif name == 'SnapshotType': + self.snapshot_type = value + elif name == 'SourceRegion': + self.source_region = value + elif name == 'VpcId': + self.vpc_id = value + else: + setattr(self, name, value) + + def update(self, validate=False): + """ + Update the DB snapshot's status information by making a call to fetch + the current snapshot attributes from the service. + + :type validate: bool + :param validate: By default, if EC2 returns no data about the + instance the update method returns quietly. If + the validate param is True, however, it will + raise a ValueError exception if no data is + returned from EC2. + """ + rs = self.connection.get_all_dbsnapshots(self.id) + if len(rs) > 0: + for i in rs: + if i.id == self.id: + self.__dict__.update(i.__dict__) + elif validate: + raise ValueError('%s is not a valid Snapshot ID' % self.id) + return self.status diff --git a/ext/boto/rds/dbsubnetgroup.py b/ext/boto/rds/dbsubnetgroup.py new file mode 100644 index 0000000000..4f6bde8924 --- /dev/null +++ b/ext/boto/rds/dbsubnetgroup.py @@ -0,0 +1,69 @@ +# Copyright (c) 2013 Franc Carter - franc.carter@gmail.com +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSubnetGroup +""" + +class DBSubnetGroup(object): + """ + Represents an RDS database subnet group + + Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSubnetGroup.html + + :ivar status: The current status of the subnet group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities + :ivar connection: boto.rds.RDSConnection associated with the current object + :ivar description: The description of the subnet group + :ivar subnet_ids: List of subnet identifiers in the group + :ivar name: Name of the subnet group + :ivar vpc_id: The ID of the VPC the subnets are inside + """ + def __init__(self, connection=None, name=None, description=None, subnet_ids=None): + self.connection = connection + self.name = name + self.description = description + if subnet_ids is not None: + self.subnet_ids = subnet_ids + else: + self.subnet_ids = [] + self.vpc_id = None + self.status = None + + def __repr__(self): + return 'DBSubnetGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SubnetIdentifier': + self.subnet_ids.append(value) + elif name == 'DBSubnetGroupName': + self.name = value + elif name == 'DBSubnetGroupDescription': + self.description = value + elif name == 'VpcId': + self.vpc_id = value + elif name == 'SubnetGroupStatus': + self.status = value + else: + setattr(self, name, value) + diff --git a/ext/boto/rds/event.py b/ext/boto/rds/event.py new file mode 100644 index 0000000000..a91f8f08a5 --- /dev/null +++ b/ext/boto/rds/event.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Event(object): + + def __init__(self, connection=None): + self.connection = connection + self.message = None + self.source_identifier = None + self.source_type = None + self.engine = None + self.date = None + + def __repr__(self): + return '"%s"' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SourceIdentifier': + self.source_identifier = value + elif name == 'SourceType': + self.source_type = value + elif name == 'Message': + self.message = value + elif name == 'Date': + self.date = value + else: + setattr(self, name, value) + diff --git a/ext/boto/rds/logfile.py b/ext/boto/rds/logfile.py new file mode 100644 index 0000000000..dd80a6ff82 --- /dev/null +++ b/ext/boto/rds/logfile.py @@ -0,0 +1,68 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Jumping Qu http://newrice.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LogFile(object): + + def __init__(self, connection=None): + self.connection = connection + self.size = None + self.log_filename = None + self.last_written = None + + def __repr__(self): + #return '(%s, %s, %s)' % (self.logfilename, self.size, self.lastwritten) + return '%s' % (self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LastWritten': + self.last_written = value + elif name == 'LogFileName': + self.log_filename = value + elif name == 'Size': + self.size = value + else: + setattr(self, name, value) + + +class LogFileObject(object): + def __init__(self, connection=None): + self.connection = connection + self.log_filename = None + + def __repr__(self): + return "LogFileObject: %s/%s" % (self.dbinstance_id, self.log_filename) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'LogFileData': + self.data = value + elif name == 'AdditionalDataPending': + self.additional_data_pending = value + elif name == 'Marker': + self.marker = value + else: + setattr(self, name, value) diff --git a/ext/boto/rds/optiongroup.py b/ext/boto/rds/optiongroup.py new file mode 100644 index 0000000000..8968b6cad6 --- /dev/null +++ b/ext/boto/rds/optiongroup.py @@ -0,0 +1,404 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an OptionGroup +""" + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.resultset import ResultSet + + +class OptionGroup(object): + """ + Represents an RDS option group + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_OptionGroup.html + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar name: Name of the option group + :ivar description: The description of the option group + :ivar engine_name: The name of the database engine to use + :ivar major_engine_version: The major version number of the engine to use + :ivar allow_both_vpc_and_nonvpc: Indicates whether this option group can be + applied to both VPC and non-VPC instances. + The value ``True`` indicates the option + group can be applied to both VPC and + non-VPC instances. + :ivar vpc_id: If AllowsVpcAndNonVpcInstanceMemberships is 'false', this + field is blank. If AllowsVpcAndNonVpcInstanceMemberships is + ``True`` and this field is blank, then this option group can + be applied to both VPC and non-VPC instances. If this field + contains a value, then this option group can only be applied + to instances that are in the VPC indicated by this field. + :ivar options: The list of :py:class:`boto.rds.optiongroup.Option` objects + associated with the group + """ + def __init__(self, connection=None, name=None, engine_name=None, + major_engine_version=None, description=None, + allow_both_vpc_and_nonvpc=False, vpc_id=None): + self.name = name + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.description = description + self.allow_both_vpc_and_nonvpc = allow_both_vpc_and_nonvpc + self.vpc_id = vpc_id + self.options = [] + + def __repr__(self): + return 'OptionGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Options': + self.options = ResultSet([ + ('Options', Option) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionGroupName': + self.name = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'OptionGroupDescription': + self.description = value + elif name == 'AllowsVpcAndNonVpcInstanceMemberships': + if value.lower() == 'true': + self.allow_both_vpc_and_nonvpc = True + else: + self.allow_both_vpc_and_nonvpc = False + elif name == 'VpcId': + self.vpc_id = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_option_group(self.name) + + +class Option(object): + """ + Describes a Option for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port: If required, the port configured for this option to use. + :ivar settings: The option settings for this option. + :ivar db_security_groups: If the option requires access to a port, then + this DB Security Group allows access to the port. + :ivar vpc_security_groups: If the option requires access to a port, then + this VPC Security Group allows access to the + port. + """ + def __init__(self, name=None, description=None, permanent=False, + persistent=False, port=None, settings=None, + db_security_groups=None, vpc_security_groups=None): + self.name = name + self.description = description + self.permanent = permanent + self.persistent = persistent + self.port = port + self.settings = settings + self.db_security_groups = db_security_groups + self.vpc_security_groups = vpc_security_groups + + if self.settings is None: + self.settings = [] + + if self.db_security_groups is None: + self.db_security_groups = [] + + if self.vpc_security_groups is None: + self.vpc_security_groups = [] + + def __repr__(self): + return 'Option:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionSettings': + self.settings = ResultSet([ + ('OptionSettings', OptionSetting) + ]) + elif name == 'DBSecurityGroupMemberships': + self.db_security_groups = ResultSet([ + ('DBSecurityGroupMemberships', DBSecurityGroup) + ]) + elif name == 'VpcSecurityGroupMemberships': + self.vpc_security_groups = ResultSet([ + ('VpcSecurityGroupMemberships', VpcSecurityGroup) + ]) + else: + return None + + def endElement(self, name, value, connection): + if name == 'OptionName': + self.name = value + elif name == 'OptionDescription': + self.description = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'Port': + self.port = int(value) + else: + setattr(self, name, value) + + +class OptionSetting(object): + """ + Describes a OptionSetting for use in an Option + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, value=None, + default_value=False, allowed_values=None, data_type=None, + apply_type=None, is_modifiable=False, is_collection=False): + self.name = name + self.description = description + self.value = value + self.default_value = default_value + self.allowed_values = allowed_values + self.data_type = data_type + self.apply_type = apply_type + self.is_modifiable = is_modifiable + self.is_collection = is_collection + + def __repr__(self): + return 'OptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Value': + self.value = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'DataType': + self.data_type = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'IsCollection': + if value.lower() == 'true': + self.is_collection = True + else: + self.is_collection = False + else: + setattr(self, name, value) + + +class VpcSecurityGroup(object): + """ + Describes a VPC security group for use in a OptionGroup + """ + def __init__(self, vpc_id=None, status=None): + self.vpc_id = vpc_id + self.status = status + + def __repr__(self): + return 'VpcSecurityGroup:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_id = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + +class OptionGroupOption(object): + """ + Describes a OptionGroupOption for use in an OptionGroup + + :ivar name: The name of the option + :ivar description: The description of the option. + :ivar engine_name: Engine name that this option can be applied to. + :ivar major_engine_version: Indicates the major engine version that the + option is available for. + :ivar min_minor_engine_version: The minimum required engine version for the + option to be applied. + :ivar permanent: Indicate if this option is permanent. + :ivar persistent: Indicate if this option is persistent. + :ivar port_required: Specifies whether the option requires a port. + :ivar default_port: If the option requires a port, specifies the default + port for the option. + :ivar settings: The option settings for this option. + :ivar depends_on: List of all options that are prerequisites for this + option. + """ + def __init__(self, name=None, description=None, engine_name=None, + major_engine_version=None, min_minor_engine_version=None, + permanent=False, persistent=False, port_required=False, + default_port=None, settings=None, depends_on=None): + self.name = name + self.description = description + self.engine_name = engine_name + self.major_engine_version = major_engine_version + self.min_minor_engine_version = min_minor_engine_version + self.permanent = permanent + self.persistent = persistent + self.port_required = port_required + self.default_port = default_port + self.settings = settings + self.depends_on = depends_on + + if self.settings is None: + self.settings = [] + + if self.depends_on is None: + self.depends_on = [] + + def __repr__(self): + return 'OptionGroupOption:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'OptionGroupOptionSettings': + self.settings = ResultSet([ + ('OptionGroupOptionSettings', OptionGroupOptionSetting) + ]) + elif name == 'OptionsDependedOn': + self.depends_on = [] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'EngineName': + self.engine_name = value + elif name == 'MajorEngineVersion': + self.major_engine_version = value + elif name == 'MinimumRequiredMinorEngineVersion': + self.min_minor_engine_version = value + elif name == 'Permanent': + if value.lower() == 'true': + self.permenant = True + else: + self.permenant = False + elif name == 'Persistent': + if value.lower() == 'true': + self.persistent = True + else: + self.persistent = False + elif name == 'PortRequired': + if value.lower() == 'true': + self.port_required = True + else: + self.port_required = False + elif name == 'DefaultPort': + self.default_port = int(value) + else: + setattr(self, name, value) + + +class OptionGroupOptionSetting(object): + """ + Describes a OptionGroupOptionSetting for use in an OptionGroupOption. + + :ivar name: The name of the option that has settings that you can set. + :ivar description: The description of the option setting. + :ivar value: The current value of the option setting. + :ivar default_value: The default value of the option setting. + :ivar allowed_values: The allowed values of the option setting. + :ivar data_type: The data type of the option setting. + :ivar apply_type: The DB engine specific parameter type. + :ivar is_modifiable: A Boolean value that, when true, indicates the option + setting can be modified from the default. + :ivar is_collection: Indicates if the option setting is part of a + collection. + """ + + def __init__(self, name=None, description=None, default_value=False, + allowed_values=None, apply_type=None, is_modifiable=False): + self.name = name + self.description = description + self.default_value = default_value + self.allowed_values = allowed_values + self.apply_type = apply_type + self.is_modifiable = is_modifiable + + def __repr__(self): + return 'OptionGroupOptionSetting:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'SettingName': + self.name = value + elif name == 'SettingDescription': + self.description = value + elif name == 'DefaultValue': + self.default_value = value + elif name == 'AllowedValues': + self.allowed_values = value + elif name == 'ApplyType': + self.apply_type = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + else: + setattr(self, name, value) diff --git a/ext/boto/rds/parametergroup.py b/ext/boto/rds/parametergroup.py new file mode 100644 index 0000000000..ade3b807e7 --- /dev/null +++ b/ext/boto/rds/parametergroup.py @@ -0,0 +1,201 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ParameterGroup(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self.name = None + self.description = None + self.engine = None + self._current_param = None + + def __repr__(self): + return 'ParameterGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Parameter': + if self._current_param: + self[self._current_param.name] = self._current_param + self._current_param = Parameter(self) + return self._current_param + + def endElement(self, name, value, connection): + if name == 'DBParameterGroupName': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Engine': + self.engine = value + else: + setattr(self, name, value) + + def modifiable(self): + mod = [] + for key in self: + p = self[key] + if p.is_modifiable: + mod.append(p) + return mod + + def get_params(self): + pg = self.connection.get_all_dbparameters(self.name) + self.update(pg) + + def add_param(self, name, value, apply_method): + param = Parameter() + param.name = name + param.value = value + param.apply_method = apply_method + self.params.append(param) + +class Parameter(object): + """ + Represents a RDS Parameter + """ + + ValidTypes = {'integer' : int, + 'string' : str, + 'boolean' : bool} + ValidSources = ['user', 'system', 'engine-default'] + ValidApplyTypes = ['static', 'dynamic'] + ValidApplyMethods = ['immediate', 'pending-reboot'] + + def __init__(self, group=None, name=None): + self.group = group + self.name = name + self._value = None + self.type = 'string' + self.source = None + self.is_modifiable = True + self.description = None + self.apply_method = None + self.allowed_values = None + + def __repr__(self): + return 'Parameter:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ParameterName': + self.name = value + elif name == 'ParameterValue': + self._value = value + elif name == 'DataType': + if value in self.ValidTypes: + self.type = value + elif name == 'Source': + if value in self.ValidSources: + self.source = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'Description': + self.description = value + elif name == 'ApplyType': + if value in self.ValidApplyTypes: + self.apply_type = value + elif name == 'AllowedValues': + self.allowed_values = value + else: + setattr(self, name, value) + + def merge(self, d, i): + prefix = 'Parameters.member.%d.' % i + if self.name: + d[prefix+'ParameterName'] = self.name + if self._value is not None: + d[prefix+'ParameterValue'] = self._value + if self.apply_type: + d[prefix+'ApplyMethod'] = self.apply_method + + def _set_string_value(self, value): + if not isinstance(value, basestring): + raise ValueError('value must be of type str') + if self.allowed_values: + choices = self.allowed_values.split(',') + if value not in choices: + raise ValueError('value must be in %s' % self.allowed_values) + self._value = value + + def _set_integer_value(self, value): + if isinstance(value, basestring): + value = int(value) + if isinstance(value, int) or isinstance(value, long): + if self.allowed_values: + min, max = self.allowed_values.split('-') + if value < int(min) or value > int(max): + raise ValueError('range is %s' % self.allowed_values) + self._value = value + else: + raise ValueError('value must be integer') + + def _set_boolean_value(self, value): + if isinstance(value, bool): + self._value = value + elif isinstance(value, basestring): + if value.lower() == 'true': + self._value = True + else: + self._value = False + else: + raise ValueError('value must be boolean') + + def set_value(self, value): + if self.type == 'string': + self._set_string_value(value) + elif self.type == 'integer': + self._set_integer_value(value) + elif self.type == 'boolean': + self._set_boolean_value(value) + else: + raise TypeError('unknown type (%s)' % self.type) + + def get_value(self): + if self._value is None: + return self._value + if self.type == 'string': + return self._value + elif self.type == 'integer': + if not isinstance(self._value, int) and not isinstance(self._value, long): + self._set_integer_value(self._value) + return self._value + elif self.type == 'boolean': + if not isinstance(self._value, bool): + self._set_boolean_value(self._value) + return self._value + else: + raise TypeError('unknown type (%s)' % self.type) + + value = property(get_value, set_value, 'The value of the parameter') + + def apply(self, immediate=False): + if immediate: + self.apply_method = 'immediate' + else: + self.apply_method = 'pending-reboot' + self.group.connection.modify_parameter_group(self.group.name, [self]) + diff --git a/ext/boto/rds/regioninfo.py b/ext/boto/rds/regioninfo.py new file mode 100644 index 0000000000..5019aca90f --- /dev/null +++ b/ext/boto/rds/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class RDSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.rds import RDSConnection + super(RDSRegionInfo, self).__init__(connection, name, endpoint, + RDSConnection) diff --git a/ext/boto/rds/statusinfo.py b/ext/boto/rds/statusinfo.py new file mode 100644 index 0000000000..d4ff9b08de --- /dev/null +++ b/ext/boto/rds/statusinfo.py @@ -0,0 +1,54 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class StatusInfo(object): + """ + Describes a status message. + """ + + def __init__(self, status_type=None, normal=None, status=None, message=None): + self.status_type = status_type + self.normal = normal + self.status = status + self.message = message + + def __repr__(self): + return 'StatusInfo:%s' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'StatusType': + self.status_type = value + elif name == 'Normal': + if value.lower() == 'true': + self.normal = True + else: + self.normal = False + elif name == 'Status': + self.status = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) diff --git a/ext/boto/rds/vpcsecuritygroupmembership.py b/ext/boto/rds/vpcsecuritygroupmembership.py new file mode 100644 index 0000000000..e0092e9c2f --- /dev/null +++ b/ext/boto/rds/vpcsecuritygroupmembership.py @@ -0,0 +1,85 @@ +# Copyright (c) 2013 Anthony Tonns http://www.corsis.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPCSecurityGroupMembership +""" + + +class VPCSecurityGroupMembership(object): + """ + Represents VPC Security Group that this RDS database is a member of + + Properties reference available from the AWS documentation at + http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\ + API_VpcSecurityGroupMembership.html + + Example:: + pri = "sg-abcdefgh" + sec = "sg-hgfedcba" + + # Create with list of str + db = c.create_dbinstance(... vpc_security_groups=[pri], ... ) + + # Modify with list of str + db.modify(... vpc_security_groups=[pri,sec], ... ) + + # Create with objects + memberships = [] + membership = VPCSecurityGroupMembership() + membership.vpc_group = pri + memberships.append(membership) + + db = c.create_dbinstance(... vpc_security_groups=memberships, ... ) + + # Modify with objects + memberships = d.vpc_security_groups + membership = VPCSecurityGroupMembership() + membership.vpc_group = sec + memberships.append(membership) + + db.modify(... vpc_security_groups=memberships, ... ) + + :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the + current object + :ivar vpc_group: This id of the VPC security group + :ivar status: Status of the VPC security group membership + ` objects that this RDS Instance + is a member of + """ + def __init__(self, connection=None, status=None, vpc_group=None): + self.connection = connection + self.status = status + self.vpc_group = vpc_group + + def __repr__(self): + return 'VPCSecurityGroupMembership:%s' % self.vpc_group + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'VpcSecurityGroupId': + self.vpc_group = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) diff --git a/ext/boto/rds2/__init__.py b/ext/boto/rds2/__init__.py new file mode 100644 index 0000000000..3ea68ecfb4 --- /dev/null +++ b/ext/boto/rds2/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the RDS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.rds2.layer1 import RDSConnection + return get_regions('rds', connection_cls=RDSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.rds2.layer1.RDSConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.rds2.layer1.RDSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + from boto.rds2.layer1 import RDSConnection + return connect('rds', region_name, connection_cls=RDSConnection, + **kw_params) diff --git a/ext/boto/rds2/exceptions.py b/ext/boto/rds2/exceptions.py new file mode 100644 index 0000000000..be610b0171 --- /dev/null +++ b/ext/boto/rds2/exceptions.py @@ -0,0 +1,234 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class InvalidSubnet(JSONResponseError): + pass + + +class DBParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class DBSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class DBSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class InstanceQuotaExceeded(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class InvalidDBParameterGroupState(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class InsufficientDBInstanceCapacity(JSONResponseError): + pass + + +class ReservedDBInstanceQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotFound(JSONResponseError): + pass + + +class DBInstanceAlreadyExists(JSONResponseError): + pass + + +class ReservedDBInstanceNotFound(JSONResponseError): + pass + + +class DBSubnetGroupDoesNotCoverEnoughAZs(JSONResponseError): + pass + + +class InvalidDBSecurityGroupState(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ReservedDBInstancesOfferingNotFound(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class SnapshotQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupQuotaExceeded(JSONResponseError): + pass + + +class DBParameterGroupNotFound(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class InvalidDBSubnetGroupState(JSONResponseError): + pass + + +class DBSubnetGroupNotFound(JSONResponseError): + pass + + +class InvalidOptionGroupState(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class DBSecurityGroupNotSupported(JSONResponseError): + pass + + +class InvalidEventSubscriptionState(JSONResponseError): + pass + + +class InvalidDBSubnetState(JSONResponseError): + pass + + +class InvalidDBSnapshotState(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class DBSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class ProvisionedIopsNotAvailableInAZ(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class OptionGroupAlreadyExists(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class DBUpgradeDependencyFailure(JSONResponseError): + pass + + +class PointInTimeRestoreNotEnabled(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class DBSubnetQuotaExceeded(JSONResponseError): + pass + + +class OptionGroupNotFound(JSONResponseError): + pass + + +class DBParameterGroupAlreadyExists(JSONResponseError): + pass + + +class DBInstanceNotFound(JSONResponseError): + pass + + +class ReservedDBInstanceAlreadyExists(JSONResponseError): + pass + + +class InvalidDBInstanceState(JSONResponseError): + pass + + +class DBSnapshotNotFound(JSONResponseError): + pass + + +class DBSnapshotAlreadyExists(JSONResponseError): + pass + + +class StorageQuotaExceeded(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass diff --git a/ext/boto/rds2/layer1.py b/ext/boto/rds2/layer1.py new file mode 100644 index 0000000000..bbe5a778a9 --- /dev/null +++ b/ext/boto/rds2/layer1.py @@ -0,0 +1,3783 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.rds2 import exceptions +from boto.compat import json + + +class RDSConnection(AWSQueryConnection): + """ + Amazon Relational Database Service + Amazon Relational Database Service (Amazon RDS) is a web service + that makes it easier to set up, operate, and scale a relational + database in the cloud. It provides cost-efficient, resizable + capacity for an industry-standard relational database and manages + common database administration tasks, freeing up developers to + focus on what makes their applications and businesses unique. + + Amazon RDS gives you access to the capabilities of a familiar + MySQL or Oracle database server. This means the code, + applications, and tools you already use today with your existing + MySQL or Oracle databases work with Amazon RDS without + modification. Amazon RDS automatically backs up your database and + maintains the database software that powers your DB instance. + Amazon RDS is flexible: you can scale your database instance's + compute resources and storage capacity to meet your application's + demand. As with all Amazon Web Services, there are no up-front + investments, and you pay only for the resources you use. + + This is the Amazon RDS API Reference . It contains a comprehensive + description of all Amazon RDS Query APIs and data types. Note that + this API is asynchronous and some actions may require polling to + determine when an action has been applied. See the parameter + description to determine if a change is applied immediately or on + the next instance reboot or during the maintenance window. For + more information on Amazon RDS concepts and usage scenarios, go to + the `Amazon RDS User Guide`_. + """ + APIVersion = "2013-09-09" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "rds.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "InvalidSubnet": exceptions.InvalidSubnet, + "DBParameterGroupQuotaExceeded": exceptions.DBParameterGroupQuotaExceeded, + "DBSubnetGroupAlreadyExists": exceptions.DBSubnetGroupAlreadyExists, + "DBSubnetGroupQuotaExceeded": exceptions.DBSubnetGroupQuotaExceeded, + "InstanceQuotaExceeded": exceptions.InstanceQuotaExceeded, + "InvalidRestore": exceptions.InvalidRestore, + "InvalidDBParameterGroupState": exceptions.InvalidDBParameterGroupState, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "DBSecurityGroupAlreadyExists": exceptions.DBSecurityGroupAlreadyExists, + "InsufficientDBInstanceCapacity": exceptions.InsufficientDBInstanceCapacity, + "ReservedDBInstanceQuotaExceeded": exceptions.ReservedDBInstanceQuotaExceeded, + "DBSecurityGroupNotFound": exceptions.DBSecurityGroupNotFound, + "DBInstanceAlreadyExists": exceptions.DBInstanceAlreadyExists, + "ReservedDBInstanceNotFound": exceptions.ReservedDBInstanceNotFound, + "DBSubnetGroupDoesNotCoverEnoughAZs": exceptions.DBSubnetGroupDoesNotCoverEnoughAZs, + "InvalidDBSecurityGroupState": exceptions.InvalidDBSecurityGroupState, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ReservedDBInstancesOfferingNotFound": exceptions.ReservedDBInstancesOfferingNotFound, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "SnapshotQuotaExceeded": exceptions.SnapshotQuotaExceeded, + "OptionGroupQuotaExceeded": exceptions.OptionGroupQuotaExceeded, + "DBParameterGroupNotFound": exceptions.DBParameterGroupNotFound, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "InvalidDBSubnetGroupState": exceptions.InvalidDBSubnetGroupState, + "DBSubnetGroupNotFound": exceptions.DBSubnetGroupNotFound, + "InvalidOptionGroupState": exceptions.InvalidOptionGroupState, + "SourceNotFound": exceptions.SourceNotFound, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "DBSecurityGroupNotSupported": exceptions.DBSecurityGroupNotSupported, + "InvalidEventSubscriptionState": exceptions.InvalidEventSubscriptionState, + "InvalidDBSubnetState": exceptions.InvalidDBSubnetState, + "InvalidDBSnapshotState": exceptions.InvalidDBSnapshotState, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "DBSecurityGroupQuotaExceeded": exceptions.DBSecurityGroupQuotaExceeded, + "ProvisionedIopsNotAvailableInAZ": exceptions.ProvisionedIopsNotAvailableInAZ, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "OptionGroupAlreadyExists": exceptions.OptionGroupAlreadyExists, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "DBUpgradeDependencyFailure": exceptions.DBUpgradeDependencyFailure, + "PointInTimeRestoreNotEnabled": exceptions.PointInTimeRestoreNotEnabled, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "DBSubnetQuotaExceeded": exceptions.DBSubnetQuotaExceeded, + "OptionGroupNotFound": exceptions.OptionGroupNotFound, + "DBParameterGroupAlreadyExists": exceptions.DBParameterGroupAlreadyExists, + "DBInstanceNotFound": exceptions.DBInstanceNotFound, + "ReservedDBInstanceAlreadyExists": exceptions.ReservedDBInstanceAlreadyExists, + "InvalidDBInstanceState": exceptions.InvalidDBInstanceState, + "DBSnapshotNotFound": exceptions.DBSnapshotNotFound, + "DBSnapshotAlreadyExists": exceptions.DBSnapshotAlreadyExists, + "StorageQuotaExceeded": exceptions.StorageQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + super(RDSConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_source_identifier_to_subscription(self, subscription_name, + source_identifier): + """ + Adds a source identifier to an existing RDS event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to add a source identifier to. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source to be added. An identifier must + begin with a letter and must contain only ASCII letters, digits, + and hyphens; it cannot end with a hyphen or contain two consecutive + hyphens. + + Constraints: + + + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='AddSourceIdentifierToSubscription', + verb='POST', + path='/', params=params) + + def add_tags_to_resource(self, resource_name, tags): + """ + Adds metadata tags to an Amazon RDS resource. These tags can + also be used with cost allocation reporting to track cost + associated with Amazon RDS resources, or used in Condition + statement in IAM policy for Amazon RDS. + + For an overview on tagging Amazon RDS resources, see `Tagging + Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be added + to. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tags: list + :param tags: The tags to be assigned to the Amazon RDS resource. + Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = {'ResourceName': resource_name, } + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='AddTagsToResource', + verb='POST', + path='/', params=params) + + def authorize_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Enables ingress to a DBSecurityGroup using one of two forms of + authorization. First, EC2 or VPC security groups can be added + to the DBSecurityGroup if the application using the database + is running on EC2 or VPC instances. Second, IP ranges are + available if the application accessing your database is + running on the Internet. Required parameters for this API are + one of CIDR range, EC2SecurityGroupId for VPC, or + (EC2SecurityGroupOwnerId and either EC2SecurityGroupName or + EC2SecurityGroupId for non-VPC). + You cannot authorize ingress from an EC2 security group in one + Region to an Amazon RDS DB instance in another. You cannot + authorize ingress from a VPC security group in one VPC to an + Amazon RDS DB instance in another. + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to add + authorization to. + + :type cidrip: string + :param cidrip: The IP range to authorize. + + :type ec2_security_group_name: string + :param ec2_security_group_name: Name of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: Id of the EC2 security group to + authorize. For VPC DB security groups, `EC2SecurityGroupId` must be + provided. Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: AWS Account Number of the owner of + the EC2 security group specified in the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def copy_db_snapshot(self, source_db_snapshot_identifier, + target_db_snapshot_identifier, tags=None): + """ + Copies the specified DBSnapshot. The source DBSnapshot must be + in the "available" state. + + :type source_db_snapshot_identifier: string + :param source_db_snapshot_identifier: The identifier for the source DB + snapshot. + Constraints: + + + + Must be the identifier for a valid system snapshot in the "available" + state. + + + Example: `rds:mydb-2012-04-02-00-01` + + :type target_db_snapshot_identifier: string + :param target_db_snapshot_identifier: The identifier for the copied + snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-db-snapshot` + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + """ + params = { + 'SourceDBSnapshotIdentifier': source_db_snapshot_identifier, + 'TargetDBSnapshotIdentifier': target_db_snapshot_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CopyDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_instance(self, db_instance_identifier, allocated_storage, + db_instance_class, engine, master_username, + master_user_password, db_name=None, + db_security_groups=None, + vpc_security_group_ids=None, + availability_zone=None, db_subnet_group_name=None, + preferred_maintenance_window=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, port=None, + multi_az=None, engine_version=None, + auto_minor_version_upgrade=None, + license_model=None, iops=None, + option_group_name=None, character_set_name=None, + publicly_accessible=None, tags=None): + """ + Creates a new DB instance. + + :type db_name: string + :param db_name: The meaning of this parameter differs according to the + database engine you use. + **MySQL** + + The name of the database to create when the DB instance is created. If + this parameter is not specified, no database is created in the DB + instance. + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters + + Cannot be a word reserved by the specified database engine + + + Type: String + + **Oracle** + + The Oracle System ID (SID) of the created DB instance. + + Default: `ORCL` + + Constraints: + + + + Cannot be longer than 8 characters + + + **SQL Server** + + Not applicable. Must be null. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This + parameter is stored as a lowercase string. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + for SQL Server). + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + + Example: `mydbinstance` + + :type allocated_storage: integer + :param allocated_storage: The amount of storage (in gigabytes) to be + initially allocated for the database instance. + **MySQL** + + Constraints: Must be an integer from 5 to 1024. + + Type: Integer + + **Oracle** + + Constraints: Must be an integer from 10 to 1024. + + **SQL Server** + + Constraints: Must be an integer from 200 to 1024 (Standard Edition and + Enterprise Edition) or from 30 to 1024 (Express Edition and Web + Edition) + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the DB + instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + :type engine: string + :param engine: The name of the database engine to be used for this + instance. + Valid Values: `MySQL` | `oracle-se1` | `oracle-se` | `oracle-ee` | + `sqlserver-ee` | `sqlserver-se` | `sqlserver-ex` | `sqlserver-web` + + :type master_username: string + :param master_username: + The name of master user for the client DB instance. + + **MySQL** + + Constraints: + + + + Must be 1 to 16 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + Type: String + + **Oracle** + + Constraints: + + + + Must be 1 to 30 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + + **SQL Server** + + Constraints: + + + + Must be 1 to 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word for the chosen database engine. + + :type master_user_password: string + :param master_user_password: The password for the master database user. + Can be any printable ASCII character except "/", '"', or "@". + Type: String + + **MySQL** + + Constraints: Must contain from 8 to 41 characters. + + **Oracle** + + Constraints: Must contain from 8 to 30 characters. + + **SQL Server** + + Constraints: Must contain from 8 to 128 characters. + + :type db_security_groups: list + :param db_security_groups: A list of DB security groups to associate + with this DB instance. + Default: The default DB security group for the database engine. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of EC2 VPC security groups to + associate with this DB instance. + Default: The default EC2 VPC security group for the DB subnet group's + VPC. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + Constraint: The AvailabilityZone parameter cannot be specified if the + MultiAZ parameter is set to `True`. The specified Availability Zone + must be in the same region as the current endpoint. + + :type db_subnet_group_name: string + :param db_subnet_group_name: A DB subnet group to associate with this + DB instance. + If there is no DB subnet group, then it is a non-VPC DB instance. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. To see the + time blocks available, see ` Adjusting the Preferred Maintenance + Window`_ in the Amazon RDS User Guide. + + Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + + Constraints: Minimum 30-minute window. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group to associate with this DB instance. + If this argument is omitted, the default DBParameterGroup for the + specified engine will be used. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days for which automated backups are retained. Setting + this parameter to a positive number enables backups. Setting this + parameter to 0 disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + :type port: integer + :param port: The port number on which the database accepts connections. + **MySQL** + + Default: `3306` + + Valid Values: `1150-65535` + + Type: Integer + + **Oracle** + + Default: `1521` + + Valid Values: `1150-65535` + + **SQL Server** + + Default: `1433` + + Valid Values: `1150-65535` except for `1434` and `3389`. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + You cannot set the AvailabilityZone parameter if the MultiAZ + parameter is set to true. + + :type engine_version: string + :param engine_version: The version number of the database engine to + use. + **MySQL** + + Example: `5.1.42` + + Type: String + + **Oracle** + + Example: `11.2.0.2.v2` + + Type: String + + **SQL Server** + + Example: `10.50.2789.0.v1` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the DB instance during the + maintenance window. + Default: `True` + + :type license_model: string + :param license_model: License model information for this DB instance. + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type character_set_name: string + :param character_set_name: For supported engines, indicates that the DB + instance should be associated with the specified CharacterSet. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'AllocatedStorage': allocated_storage, + 'DBInstanceClass': db_instance_class, + 'Engine': engine, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if port is not None: + params['Port'] = port + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if character_set_name is not None: + params['CharacterSetName'] = character_set_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstance', + verb='POST', + path='/', params=params) + + def create_db_instance_read_replica(self, db_instance_identifier, + source_db_instance_identifier, + db_instance_class=None, + availability_zone=None, port=None, + auto_minor_version_upgrade=None, + iops=None, option_group_name=None, + publicly_accessible=None, tags=None): + """ + Creates a DB instance that acts as a read replica of a source + DB instance. + + All read replica DB instances are created as Single-AZ + deployments with backups disabled. All other DB instance + attributes (including DB security groups and DB parameter + groups) are inherited from the source DB instance, except as + specified below. + + The source DB instance must have backup retention enabled. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier of the read + replica. This is the unique key that identifies a DB instance. This + parameter is stored as a lowercase string. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: The identifier of the DB instance + that will act as the source for the read replica. Each DB instance + can have up to five read replicas. + Constraints: Must be the identifier of an existing DB instance that is + not already a read replica DB instance. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the read + replica. + Valid Values: `db.m1.small | db.m1.medium | db.m1.large | db.m1.xlarge + | db.m2.xlarge |db.m2.2xlarge | db.m2.4xlarge` + + Default: Inherits from the source DB instance. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone that the + read replica will be created in. + Default: A random, system-chosen Availability Zone in the endpoint's + region. + + Example: `us-east-1d` + + :type port: integer + :param port: The port number that the DB instance uses for connections. + Default: Inherits from the source DB instance + + Valid Values: `1150-65535` + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor engine upgrades + will be applied automatically to the read replica during the + maintenance window. + Default: Inherits from the source DB instance + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + + :type option_group_name: string + :param option_group_name: The option group the DB instance will be + associated with. If omitted, the default option group for the + engine specified will be used. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if port is not None: + params['Port'] = port + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBInstanceReadReplica', + verb='POST', + path='/', params=params) + + def create_db_parameter_group(self, db_parameter_group_name, + db_parameter_group_family, description, + tags=None): + """ + Creates a new DB parameter group. + + A DB parameter group is initially created with the default + parameters for the database engine used by the DB instance. To + provide custom values for any of the parameters, you must + modify the group after creating it using + ModifyDBParameterGroup . Once you've created a DB parameter + group, you need to associate it with your DB instance using + ModifyDBInstance . When you associate a new DB parameter group + with a running DB instance, you need to reboot the DB Instance + for the new DB parameter group and associated settings to take + effect. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + This value is stored as a lower-case string. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The DB parameter group family name. A + DB parameter group can be associated with one and only one DB + parameter group family, and can be applied only to a DB instance + running a database engine and engine version compatible with that + DB parameter group family. + + :type description: string + :param description: The description for the DB parameter group. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBParameterGroupName': db_parameter_group_name, + 'DBParameterGroupFamily': db_parameter_group_family, + 'Description': description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBParameterGroup', + verb='POST', + path='/', params=params) + + def create_db_security_group(self, db_security_group_name, + db_security_group_description, tags=None): + """ + Creates a new DB security group. DB security groups control + access to a DB instance. + + :type db_security_group_name: string + :param db_security_group_name: The name for the DB security group. This + value is stored as a lowercase string. + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + + Example: `mysecuritygroup` + + :type db_security_group_description: string + :param db_security_group_description: The description for the DB + security group. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBSecurityGroupName': db_security_group_name, + 'DBSecurityGroupDescription': db_security_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSecurityGroup', + verb='POST', + path='/', params=params) + + def create_db_snapshot(self, db_snapshot_identifier, + db_instance_identifier, tags=None): + """ + Creates a DBSnapshot. The source DBInstance must be in + "available" state. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The identifier for the DB snapshot. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This is the unique key that identifies a DB + instance. This parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBSnapshotIdentifier': db_snapshot_identifier, + 'DBInstanceIdentifier': db_instance_identifier, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSnapshot', + verb='POST', + path='/', params=params) + + def create_db_subnet_group(self, db_subnet_group_name, + db_subnet_group_description, subnet_ids, + tags=None): + """ + Creates a new DB subnet group. DB subnet groups must contain + at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 Subnet IDs for the DB subnet group. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBSubnetGroupName': db_subnet_group_name, + 'DBSubnetGroupDescription': db_subnet_group_description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateDBSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, event_categories=None, + source_ids=None, enabled=None, tags=None): + """ + Creates an RDS event notification subscription. This action + requires a topic ARN (Amazon Resource Name) created by either + the RDS console, the SNS console, or the SNS API. To obtain an + ARN with SNS, you must create a topic in Amazon SNS and + subscribe to the topic. The ARN is displayed in the SNS + console. + + You can specify the type of source (SourceType) you want to be + notified of, provide a list of RDS sources (SourceIds) that + triggers the events, and provide a list of event categories + (EventCategories) for events you want to be notified of. For + example, you can specify SourceType = db-instance, SourceIds = + mydbinstance1, mydbinstance2 and EventCategories = + Availability, Backup. + + If you specify both the SourceType and SourceIds, such as + SourceType = db-instance and SourceIdentifier = myDBInstance1, + you will be notified of all the db-instance events for the + specified source. If you specify a SourceType but do not + specify a SourceIdentifier, you will receive notice of the + events for that source type for all your RDS sources. If you + do not specify either the SourceType nor the SourceIdentifier, + you will be notified of events generated from all RDS sources + belonging to your customer account. + + :type subscription_name: string + :param subscription_name: The name of the subscription. + Constraints: The name must be less than 255 characters. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type source_ids: list + :param source_ids: + The list of identifiers of the event sources for which events will be + returned. If not specified, then all sources are included in the + response. An identifier must begin with a letter and must contain + only ASCII letters, digits, and hyphens; it cannot end with a + hyphen or contain two consecutive hyphens. + + Constraints: + + + + If SourceIds are supplied, SourceType must also be provided. + + If the source type is a DB instance, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is a DB security group, a `DBSecurityGroupName` + must be supplied. + + If the source type is a DB parameter group, a `DBParameterGroupName` + must be supplied. + + If the source type is a DB snapshot, a `DBSnapshotIdentifier` must be + supplied. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription, set to **false** to create the subscription but not + active it. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_option_group(self, option_group_name, engine_name, + major_engine_version, option_group_description, + tags=None): + """ + Creates a new option group. You can create up to 20 option + groups. + + :type option_group_name: string + :param option_group_name: Specifies the name of the option group to be + created. + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `myoptiongroup` + + :type engine_name: string + :param engine_name: Specifies the name of the engine that this option + group should be associated with. + + :type major_engine_version: string + :param major_engine_version: Specifies the major version of the engine + that this option group should be associated with. + + :type option_group_description: string + :param option_group_description: The description of the option group. + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'OptionGroupName': option_group_name, + 'EngineName': engine_name, + 'MajorEngineVersion': major_engine_version, + 'OptionGroupDescription': option_group_description, + } + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='CreateOptionGroup', + verb='POST', + path='/', params=params) + + def delete_db_instance(self, db_instance_identifier, + skip_final_snapshot=None, + final_db_snapshot_identifier=None): + """ + The DeleteDBInstance action deletes a previously provisioned + DB instance. A successful response from the web service + indicates the request was received correctly. When you delete + a DB instance, all automated backups for that instance are + deleted and cannot be recovered. Manual DB snapshots of the DB + instance to be deleted are not deleted. + + If a final DB snapshot is requested the status of the RDS + instance will be "deleting" until the DB snapshot is created. + The API action `DescribeDBInstance` is used to monitor the + status of this operation. The action cannot be canceled or + reverted once submitted. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier for the DB instance to be deleted. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type skip_final_snapshot: boolean + :param skip_final_snapshot: Determines whether a final DB snapshot is + created before the DB instance is deleted. If `True` is specified, + no DBSnapshot is created. If false is specified, a DB snapshot is + created before the DB instance is deleted. + The FinalDBSnapshotIdentifier parameter must be specified if + SkipFinalSnapshot is `False`. + + Default: `False` + + :type final_db_snapshot_identifier: string + :param final_db_snapshot_identifier: + The DBSnapshotIdentifier of the new DBSnapshot created when + SkipFinalSnapshot is set to `False`. + + Specifying this parameter and also setting the SkipFinalShapshot + parameter to true results in an error. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if skip_final_snapshot is not None: + params['SkipFinalSnapshot'] = str( + skip_final_snapshot).lower() + if final_db_snapshot_identifier is not None: + params['FinalDBSnapshotIdentifier'] = final_db_snapshot_identifier + return self._make_request( + action='DeleteDBInstance', + verb='POST', + path='/', params=params) + + def delete_db_parameter_group(self, db_parameter_group_name): + """ + Deletes a specified DBParameterGroup. The DBParameterGroup + cannot be associated with any RDS instances to be deleted. + The specified DB parameter group cannot be associated with any + DB instances. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + You cannot delete a default DB parameter group + + Cannot be associated with any DB instances + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + return self._make_request( + action='DeleteDBParameterGroup', + verb='POST', + path='/', params=params) + + def delete_db_security_group(self, db_security_group_name): + """ + Deletes a DB security group. + The specified DB security group must not be associated with + any DB instances. + + :type db_security_group_name: string + :param db_security_group_name: + The name of the DB security group to delete. + + You cannot delete the default DB security group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + Must not be "Default" + + May not contain spaces + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + return self._make_request( + action='DeleteDBSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_db_snapshot(self, db_snapshot_identifier): + """ + Deletes a DBSnapshot. + The DBSnapshot must be in the `available` state to be deleted. + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: The DBSnapshot identifier. + Constraints: Must be the name of an existing DB snapshot in the + `available` state. + + """ + params = {'DBSnapshotIdentifier': db_snapshot_identifier, } + return self._make_request( + action='DeleteDBSnapshot', + verb='POST', + path='/', params=params) + + def delete_db_subnet_group(self, db_subnet_group_name): + """ + Deletes a DB subnet group. + The specified database subnet group must not be associated + with any DB instances. + + :type db_subnet_group_name: string + :param db_subnet_group_name: + The name of the database subnet group to delete. + + You cannot delete the default subnet group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + return self._make_request( + action='DeleteDBSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an RDS event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to delete. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_option_group(self, option_group_name): + """ + Deletes an existing option group. + + :type option_group_name: string + :param option_group_name: + The name of the option group to be deleted. + + You cannot delete default option groups. + + """ + params = {'OptionGroupName': option_group_name, } + return self._make_request( + action='DeleteOptionGroup', + verb='POST', + path='/', params=params) + + def describe_db_engine_versions(self, engine=None, engine_version=None, + db_parameter_group_family=None, + max_records=None, marker=None, + default_only=None, + list_supported_character_sets=None): + """ + Returns a list of the available DB engines. + + :type engine: string + :param engine: The database engine to return. + + :type engine_version: string + :param engine_version: The database engine version to return. + Example: `5.1.49` + + :type db_parameter_group_family: string + :param db_parameter_group_family: + The name of a specific DB parameter group family to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + :type default_only: boolean + :param default_only: Indicates that only the default version of the + specified engine or engine and major version combination is + returned. + + :type list_supported_character_sets: boolean + :param list_supported_character_sets: If this parameter is specified, + and if the requested engine supports the CharacterSetName parameter + for CreateDBInstance, the response includes a list of supported + character sets for each engine version. + + """ + params = {} + if engine is not None: + params['Engine'] = engine + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_parameter_group_family is not None: + params['DBParameterGroupFamily'] = db_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if default_only is not None: + params['DefaultOnly'] = str( + default_only).lower() + if list_supported_character_sets is not None: + params['ListSupportedCharacterSets'] = str( + list_supported_character_sets).lower() + return self._make_request( + action='DescribeDBEngineVersions', + verb='POST', + path='/', params=params) + + def describe_db_instances(self, db_instance_identifier=None, + filters=None, max_records=None, marker=None): + """ + Returns information about provisioned RDS instances. This API + supports pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + The user-supplied instance identifier. If this parameter is specified, + information from only the specific DB instance is returned. This + parameter isn't case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBInstances request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords` . + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBInstances', + verb='POST', + path='/', params=params) + + def describe_db_log_files(self, db_instance_identifier, + filename_contains=None, file_last_written=None, + file_size=None, max_records=None, marker=None): + """ + Returns a list of DB log files for the DB instance. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filename_contains: string + :param filename_contains: Filters the available log files for log file + names that contain the specified string. + + :type file_last_written: long + :param file_last_written: Filters the available log files for files + written since the specified date, in POSIX timestamp format. + + :type file_size: long + :param file_size: Filters the available log files for files larger than + the specified size. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified MaxRecords + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if filename_contains is not None: + params['FilenameContains'] = filename_contains + if file_last_written is not None: + params['FileLastWritten'] = file_last_written + if file_size is not None: + params['FileSize'] = file_size + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBLogFiles', + verb='POST', + path='/', params=params) + + def describe_db_parameter_groups(self, db_parameter_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBParameterGroup` descriptions. If a + `DBParameterGroupName` is specified, the list will contain + only the description of the specified DB parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameterGroups` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameterGroups', + verb='POST', + path='/', params=params) + + def describe_db_parameters(self, db_parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns the detailed parameter list for a particular DB + parameter group. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of a specific DB parameter group to return details for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type source: string + :param source: The parameter types to return. + Default: All parameter types returned + + Valid Values: `user | system | engine-default` + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBParameters` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBParameters', + verb='POST', + path='/', params=params) + + def describe_db_security_groups(self, db_security_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of `DBSecurityGroup` descriptions. If a + `DBSecurityGroupName` is specified, the list will contain only + the descriptions of the specified DB security group. + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + return details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSecurityGroups request. If this parameter is specified, + the response includes only records beyond the marker, up to the + value specified by `MaxRecords`. + + """ + params = {} + if db_security_group_name is not None: + params['DBSecurityGroupName'] = db_security_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_db_snapshots(self, db_instance_identifier=None, + db_snapshot_identifier=None, + snapshot_type=None, filters=None, + max_records=None, marker=None): + """ + Returns information about DB snapshots. This API supports + pagination. + + :type db_instance_identifier: string + :param db_instance_identifier: + A DB instance identifier to retrieve the list of DB snapshots for. + Cannot be used in conjunction with `DBSnapshotIdentifier`. This + parameter is not case sensitive. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: + A specific DB snapshot identifier to describe. Cannot be used in + conjunction with `DBInstanceIdentifier`. This value is stored as a + lowercase string. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + If this is the identifier of an automated snapshot, the + `SnapshotType` parameter must also be specified. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots that will be returned. + Values can be "automated" or "manual." If not specified, the + returned results will include all snapshots types. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeDBSnapshots` request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_instance_identifier is not None: + params['DBInstanceIdentifier'] = db_instance_identifier + if db_snapshot_identifier is not None: + params['DBSnapshotIdentifier'] = db_snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSnapshots', + verb='POST', + path='/', params=params) + + def describe_db_subnet_groups(self, db_subnet_group_name=None, + filters=None, max_records=None, + marker=None): + """ + Returns a list of DBSubnetGroup descriptions. If a + DBSubnetGroupName is specified, the list will contain only the + descriptions of the specified DBSubnetGroup. + + For an overview of CIDR ranges, go to the `Wikipedia + Tutorial`_. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name of the DB subnet group to return + details for. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeDBSubnetGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDBSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_engine_default_parameters(self, db_parameter_group_family, + max_records=None, marker=None): + """ + Returns the default engine and system parameter information + for the specified database engine. + + :type db_parameter_group_family: string + :param db_parameter_group_family: The name of the DB parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + `DescribeEngineDefaultParameters` request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = { + 'DBParameterGroupFamily': db_parameter_group_family, + } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEngineDefaultParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of categories for all event source types, or, + if specified, for a specified source type. You can see a list + of the event categories and source types in the ` Events`_ + topic in the Amazon RDS User Guide. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + filters=None, max_records=None, + marker=None): + """ + Lists all the subscription descriptions for a customer + account. The description for a subscription includes + SubscriptionName, SNSTopicARN, CustomerID, SourceType, + SourceID, CreationTime, and Status. + + If you specify a SubscriptionName, lists the description for + that subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to describe. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + event_categories=None, max_records=None, marker=None): + """ + Returns events related to DB instances, DB security groups, DB + snapshots, and DB parameter groups for the past 14 days. + Events specific to a particular DB instance, DB security + group, database snapshot, or DB parameter group can be + obtained by providing the name as a parameter. By default, the + past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If not specified, then all sources are included in the response. + + Constraints: + + + + If SourceIdentifier is supplied, SourceType must also be provided. + + If the source type is `DBInstance`, then a `DBInstanceIdentifier` + must be supplied. + + If the source type is `DBSecurityGroup`, a `DBSecurityGroupName` must + be supplied. + + If the source type is `DBParameterGroup`, a `DBParameterGroupName` + must be supplied. + + If the source type is `DBSnapshot`, a `DBSnapshotIdentifier` must be + supplied. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type source_type: string + :param source_type: The event source to retrieve events for. If no + value is specified, all events are returned. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: 2009-07-08T18:00Z + + :type duration: integer + :param duration: The number of minutes to retrieve events for. + Default: 60 + + :type event_categories: list + :param event_categories: A list of event categories that trigger + notifications for a event notification subscription. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results may be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeEvents request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_option_group_options(self, engine_name, + major_engine_version=None, + max_records=None, marker=None): + """ + Describes all available options. + + :type engine_name: string + :param engine_name: A required parameter. Options available for the + given Engine name will be described. + + :type major_engine_version: string + :param major_engine_version: If specified, filters the results to + include only options for the specified major engine version. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {'EngineName': engine_name, } + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOptionGroupOptions', + verb='POST', + path='/', params=params) + + def describe_option_groups(self, option_group_name=None, filters=None, + marker=None, max_records=None, + engine_name=None, major_engine_version=None): + """ + Describes the available option groups. + + :type option_group_name: string + :param option_group_name: The name of the option group to describe. + Cannot be supplied together with EngineName or MajorEngineVersion. + + :type filters: list + :param filters: + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOptionGroups request. If this parameter is specified, the + response includes only records beyond the marker, up to the value + specified by `MaxRecords`. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type engine_name: string + :param engine_name: Filters the list of option groups to only include + groups associated with a specific database engine. + + :type major_engine_version: string + :param major_engine_version: Filters the list of option groups to only + include groups associated with a specific database engine version. + If specified, then EngineName must also be specified. + + """ + params = {} + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if marker is not None: + params['Marker'] = marker + if max_records is not None: + params['MaxRecords'] = max_records + if engine_name is not None: + params['EngineName'] = engine_name + if major_engine_version is not None: + params['MajorEngineVersion'] = major_engine_version + return self._make_request( + action='DescribeOptionGroups', + verb='POST', + path='/', params=params) + + def describe_orderable_db_instance_options(self, engine, + engine_version=None, + db_instance_class=None, + license_model=None, vpc=None, + max_records=None, marker=None): + """ + Returns a list of orderable DB instance options for the + specified engine. + + :type engine: string + :param engine: The name of the engine to retrieve DB instance options + for. + + :type engine_version: string + :param engine_version: The engine version filter value. Specify this + parameter to show only the available offerings matching the + specified engine version. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type license_model: string + :param license_model: The license model filter value. Specify this + parameter to show only the available offerings matching the + specified license model. + + :type vpc: boolean + :param vpc: The VPC filter value. Specify this parameter to show only + the available VPC or non-VPC offerings. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableDBInstanceOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords` . + + """ + params = {'Engine': engine, } + if engine_version is not None: + params['EngineVersion'] = engine_version + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if license_model is not None: + params['LicenseModel'] = license_model + if vpc is not None: + params['Vpc'] = str( + vpc).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableDBInstanceOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances(self, reserved_db_instance_id=None, + reserved_db_instances_offering_id=None, + db_instance_class=None, duration=None, + product_description=None, + offering_type=None, multi_az=None, + filters=None, max_records=None, + marker=None): + """ + Returns information about reserved DB instances for this + account, or about a specified reserved DB instance. + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: The reserved DB instance identifier + filter value. Specify this parameter to show only the reservation + that matches the specified reservation ID. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only purchased + reservations matching the specified offering identifier. + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only those reservations matching the + specified DB instances class. + + :type duration: string + :param duration: The duration filter value, specified in years or + seconds. Specify this parameter to show only reservations for this + duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: The product description filter value. + Specify this parameter to show only those reservations matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only those reservations matching the specified Multi-AZ + parameter. + + :type filters: list + :param filters: + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if filters is not None: + self.build_complex_list_params( + params, filters, + 'Filters.member', + ('FilterName', 'FilterValue')) + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstances', + verb='POST', + path='/', params=params) + + def describe_reserved_db_instances_offerings(self, + reserved_db_instances_offering_id=None, + db_instance_class=None, + duration=None, + product_description=None, + offering_type=None, + multi_az=None, + max_records=None, + marker=None): + """ + Lists available reserved DB instance offerings. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The offering identifier + filter value. Specify this parameter to show only the available + offering that matches the specified reservation identifier. + Example: `438012d3-4052-4cc7-b2e3-8d3372e0e706` + + :type db_instance_class: string + :param db_instance_class: The DB instance class filter value. Specify + this parameter to show only the available offerings matching the + specified DB instance class. + + :type duration: string + :param duration: Duration filter value, specified in years or seconds. + Specify this parameter to show only reservations for this duration. + Valid Values: `1 | 3 | 31536000 | 94608000` + + :type product_description: string + :param product_description: Product description filter value. Specify + this parameter to show only the available offerings matching the + specified product description. + + :type offering_type: string + :param offering_type: The offering type filter value. Specify this + parameter to show only the available offerings matching the + specified offering type. + Valid Values: `"Light Utilization" | "Medium Utilization" | "Heavy + Utilization" ` + + :type multi_az: boolean + :param multi_az: The Multi-AZ filter value. Specify this parameter to + show only the available offerings matching the specified Multi-AZ + parameter. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more than the `MaxRecords` value is available, a + pagination token called a marker is included in the response so + that the following results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + request. If this parameter is specified, the response includes only + records beyond the marker, up to the value specified by + `MaxRecords`. + + """ + params = {} + if reserved_db_instances_offering_id is not None: + params['ReservedDBInstancesOfferingId'] = reserved_db_instances_offering_id + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if duration is not None: + params['Duration'] = duration + if product_description is not None: + params['ProductDescription'] = product_description + if offering_type is not None: + params['OfferingType'] = offering_type + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedDBInstancesOfferings', + verb='POST', + path='/', params=params) + + def download_db_log_file_portion(self, db_instance_identifier, + log_file_name, marker=None, + number_of_lines=None): + """ + Downloads the last line of the specified log file. + + :type db_instance_identifier: string + :param db_instance_identifier: + The customer-assigned name of the DB instance that contains the log + files you want to list. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type log_file_name: string + :param log_file_name: The name of the log file to be downloaded. + + :type marker: string + :param marker: The pagination token provided in the previous request. + If this parameter is specified the response includes only records + beyond the marker, up to MaxRecords. + + :type number_of_lines: integer + :param number_of_lines: The number of lines remaining to be downloaded. + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'LogFileName': log_file_name, + } + if marker is not None: + params['Marker'] = marker + if number_of_lines is not None: + params['NumberOfLines'] = number_of_lines + return self._make_request( + action='DownloadDBLogFilePortion', + verb='POST', + path='/', params=params) + + def list_tags_for_resource(self, resource_name): + """ + Lists all tags on an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource with tags to be listed. + This value is an Amazon Resource Name (ARN). For information about + creating an ARN, see ` Constructing an RDS Amazon Resource Name + (ARN)`_. + + """ + params = {'ResourceName': resource_name, } + return self._make_request( + action='ListTagsForResource', + verb='POST', + path='/', params=params) + + def modify_db_instance(self, db_instance_identifier, + allocated_storage=None, db_instance_class=None, + db_security_groups=None, + vpc_security_group_ids=None, + apply_immediately=None, master_user_password=None, + db_parameter_group_name=None, + backup_retention_period=None, + preferred_backup_window=None, + preferred_maintenance_window=None, multi_az=None, + engine_version=None, + allow_major_version_upgrade=None, + auto_minor_version_upgrade=None, iops=None, + option_group_name=None, + new_db_instance_identifier=None): + """ + Modify settings for a DB instance. You can change one or more + database configuration parameters by specifying these + parameters and the new values in the request. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This value is stored as a lowercase string. + + Constraints: + + + + Must be the identifier for an existing DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: integer + :param allocated_storage: The new storage capacity of the RDS instance. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + **MySQL** + + Default: Uses existing setting + + Valid Values: 5-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + **Oracle** + + Default: Uses existing setting + + Valid Values: 10-1024 + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + **SQL Server** + + Cannot be modified. + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type db_instance_class: string + :param db_instance_class: The new compute and memory capacity of the DB + instance. To determine the instance classes that are available for + a particular DB engine, use the DescribeOrderableDBInstanceOptions + action. + Passing a value for this parameter causes an outage during the change + and is applied during the next maintenance window, unless the + `ApplyImmediately` parameter is specified as `True` for this + request. + + Default: Uses existing setting + + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type db_security_groups: list + :param db_security_groups: + A list of DB security groups to authorize on this DB instance. Changing + this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: + A list of EC2 VPC security groups to authorize on this DB instance. + This change is asynchronously applied as soon as possible. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type apply_immediately: boolean + :param apply_immediately: Specifies whether or not the modifications in + this request and any pending modifications are asynchronously + applied as soon as possible, regardless of the + `PreferredMaintenanceWindow` setting for the DB instance. + If this parameter is passed as `False`, changes to the DB instance are + applied on the next call to RebootDBInstance, the next maintenance + reboot, or the next failure reboot, whichever occurs first. See + each parameter to determine when a change is applied. + + Default: `False` + + :type master_user_password: string + :param master_user_password: + The new password for the DB instance master user. Can be any printable + ASCII character except "/", '"', or "@". + + Changing this parameter does not result in an outage and the change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting + + Constraints: Must be 8 to 41 alphanumeric characters (MySQL), 8 to 30 + alphanumeric characters (Oracle), or 8 to 128 alphanumeric + characters (SQL Server). + + Amazon RDS API actions never return the password, so this action + provides a way to regain access to a master instance user if the + password is lost. + + :type db_parameter_group_name: string + :param db_parameter_group_name: The name of the DB parameter group to + apply to this DB instance. Changing this parameter does not result + in an outage and the change is applied during the next maintenance + window unless the `ApplyImmediately` parameter is set to `True` for + this request. + Default: Uses existing setting + + Constraints: The DB parameter group must be in the same DB parameter + group family as this DB instance. + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Changing this parameter can result in an outage if you change from 0 to + a non-zero value or from a non-zero value to 0. These changes are + applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + you change the parameter from one non-zero value to another non- + zero value, the change is asynchronously applied as soon as + possible. + + Default: Uses existing setting + + Constraints: + + + + Must be a value from 0 to 8 + + Cannot be set to 0 if the DB instance is a master instance with read + replicas or if the DB instance is a read replica + + :type preferred_backup_window: string + :param preferred_backup_window: + The daily time range during which automated backups are created if + automated backups are enabled, as determined by the + `BackupRetentionPeriod`. Changing this parameter does not result in + an outage and the change is asynchronously applied as soon as + possible. + + Constraints: + + + + Must be in the format hh24:mi-hh24:mi + + Times should be Universal Time Coordinated (UTC) + + Must not conflict with the preferred maintenance window + + Must be at least 30 minutes + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, which may result in an + outage. Changing this parameter does not result in an outage, + except in the following situation, and the change is asynchronously + applied as soon as possible. If there are pending actions that + cause a reboot, and the maintenance window is changed to include + the current time, then changing this parameter will cause a reboot + of the DB instance. If moving this window to the current time, + there must be at least 30 minutes between the current time and end + of the window to ensure pending changes are applied. + Default: Uses existing setting + + Format: ddd:hh24:mi-ddd:hh24:mi + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Changing this parameter does not result in an outage and the change + is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + Constraints: Cannot be specified if the DB instance is a read replica. + + :type engine_version: string + :param engine_version: The version number of the database engine to + upgrade to. Changing this parameter results in an outage and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. + For major version upgrades, if a non-default DB parameter group is + currently in use, a new DB parameter group in the DB parameter + group family for the new engine version must be specified. The new + DB parameter group can be the default for that DB parameter group + family. + + Example: `5.1.42` + + :type allow_major_version_upgrade: boolean + :param allow_major_version_upgrade: Indicates that major version + upgrades are allowed. Changing this parameter does not result in an + outage and the change is asynchronously applied as soon as + possible. + Constraints: This parameter must be set to true when specifying a value + for the EngineVersion parameter that is a different major version + than the DB instance's current version. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. Changing this parameter does not result in + an outage except in the following case and the change is + asynchronously applied as soon as possible. An outage will result + if this parameter is set to `True` during the maintenance window, + and a newer minor version is available, and RDS has enabled auto + patching for that engine version. + + :type iops: integer + :param iops: The new Provisioned IOPS (I/O operations per second) value + for the RDS instance. Changing this parameter does not result in an + outage and the change is applied during the next maintenance window + unless the `ApplyImmediately` parameter is set to `True` for this + request. + Default: Uses existing setting + + Constraints: Value supplied must be at least 10% greater than the + current value. Values that are not at least 10% greater than the + existing value are rounded up so that they are 10% greater than the + current value. + + Type: Integer + + If you choose to migrate your DB instance from using standard storage + to using Provisioned IOPS, or from using Provisioned IOPS to using + standard storage, the process can take time. The duration of the + migration depends on several factors such as database load, storage + size, storage type (standard or Provisioned IOPS), amount of IOPS + provisioned (if any), and the number of prior scale storage + operations. Typical migration times are under 24 hours, but the + process can take up to several days in some cases. During the + migration, the DB instance will be available for use, but may + experience performance degradation. While the migration takes + place, nightly backups for the instance will be suspended. No other + Amazon RDS operations can take place for the instance, including + modifying the instance, rebooting the instance, deleting the + instance, creating a read replica for the instance, and creating a + DB snapshot of the instance. + + :type option_group_name: string + :param option_group_name: Indicates that the DB instance should be + associated with the specified option group. Changing this parameter + does not result in an outage except in the following case and the + change is applied during the next maintenance window unless the + `ApplyImmediately` parameter is set to `True` for this request. If + the parameter change results in an option group that enables OEM, + this change can cause a brief (sub-second) period during which new + connections are rejected but existing connections are not + interrupted. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type new_db_instance_identifier: string + :param new_db_instance_identifier: + The new DB instance identifier for the DB instance when renaming a DB + Instance. This value is stored as a lowercase string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if allocated_storage is not None: + params['AllocatedStorage'] = allocated_storage + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if db_security_groups is not None: + self.build_list_params(params, + db_security_groups, + 'DBSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if db_parameter_group_name is not None: + params['DBParameterGroupName'] = db_parameter_group_name + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if engine_version is not None: + params['EngineVersion'] = engine_version + if allow_major_version_upgrade is not None: + params['AllowMajorVersionUpgrade'] = str( + allow_major_version_upgrade).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if new_db_instance_identifier is not None: + params['NewDBInstanceIdentifier'] = new_db_instance_identifier + return self._make_request( + action='ModifyDBInstance', + verb='POST', + path='/', params=params) + + def modify_db_parameter_group(self, db_parameter_group_name, parameters): + """ + Modifies the parameters of a DB parameter group. To modify + more than one parameter, submit a list of the following: + `ParameterName`, `ParameterValue`, and `ApplyMethod`. A + maximum of 20 parameters can be modified in a single request. + + The `apply-immediate` method can be used only for dynamic + parameters; the `pending-reboot` method can be used with MySQL + and Oracle DB instances for either dynamic or static + parameters. For Microsoft SQL Server DB instances, the + `pending-reboot` method can be used only for static + parameters. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be the name of an existing DB parameter group + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type parameters: list + :param parameters: + An array of parameter names, values, and the apply method for the + parameter update. At least one parameter name, value, and apply + method must be supplied; subsequent arguments are optional. A + maximum of 20 parameters may be modified in a single request. + + Valid Values (for the application method): `immediate | pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the pending-reboot value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ModifyDBParameterGroup', + verb='POST', + path='/', params=params) + + def modify_db_subnet_group(self, db_subnet_group_name, subnet_ids, + db_subnet_group_description=None): + """ + Modifies an existing DB subnet group. DB subnet groups must + contain at least one subnet in at least two AZs in the region. + + :type db_subnet_group_name: string + :param db_subnet_group_name: The name for the DB subnet group. This + value is stored as a lowercase string. + Constraints: Must contain no more than 255 alphanumeric characters or + hyphens. Must not be "Default". + + Example: `mySubnetgroup` + + :type db_subnet_group_description: string + :param db_subnet_group_description: The description for the DB subnet + group. + + :type subnet_ids: list + :param subnet_ids: The EC2 subnet IDs for the DB subnet group. + + """ + params = {'DBSubnetGroupName': db_subnet_group_name, } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if db_subnet_group_description is not None: + params['DBSubnetGroupDescription'] = db_subnet_group_description + return self._make_request( + action='ModifyDBSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + event_categories=None, enabled=None): + """ + Modifies an existing RDS event notification subscription. Note + that you cannot modify the source identifiers using this call; + to change source identifiers for a subscription, use the + AddSourceIdentifierToSubscription and + RemoveSourceIdentifierFromSubscription calls. + + You can see a list of the event categories for a given + SourceType in the `Events`_ topic in the Amazon RDS User Guide + or by using the **DescribeEventCategories** action. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + created for event notification. The ARN is created by Amazon SNS + when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a DB instance, you would set this parameter to db-instance. if + this value is not specified, all events are returned. + Valid values: db-instance | db-parameter-group | db-security-group | + db-snapshot + + :type event_categories: list + :param event_categories: A list of event categories for a SourceType + that you want to subscribe to. You can see a list of the categories + for a given SourceType in the `Events`_ topic in the Amazon RDS + User Guide or by using the **DescribeEventCategories** action. + + :type enabled: boolean + :param enabled: A Boolean value; set to **true** to activate the + subscription. + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_option_group(self, option_group_name, options_to_include=None, + options_to_remove=None, apply_immediately=None): + """ + Modifies an existing option group. + + :type option_group_name: string + :param option_group_name: The name of the option group to be modified. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type options_to_include: list + :param options_to_include: Options in this list are added to the option + group or, if already present, the specified configuration is used + to update the existing configuration. + + :type options_to_remove: list + :param options_to_remove: Options in this list are removed from the + option group. + + :type apply_immediately: boolean + :param apply_immediately: Indicates whether the changes should be + applied immediately, or during the next maintenance window for each + instance associated with the option group. + + """ + params = {'OptionGroupName': option_group_name, } + if options_to_include is not None: + self.build_complex_list_params( + params, options_to_include, + 'OptionsToInclude.member', + ('OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')) + if options_to_remove is not None: + self.build_list_params(params, + options_to_remove, + 'OptionsToRemove.member') + if apply_immediately is not None: + params['ApplyImmediately'] = str( + apply_immediately).lower() + return self._make_request( + action='ModifyOptionGroup', + verb='POST', + path='/', params=params) + + def promote_read_replica(self, db_instance_identifier, + backup_retention_period=None, + preferred_backup_window=None): + """ + Promotes a read replica DB instance to a standalone DB + instance. + + :type db_instance_identifier: string + :param db_instance_identifier: The DB instance identifier. This value + is stored as a lowercase string. + Constraints: + + + + Must be the identifier for an existing read replica DB instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: mydbinstance + + :type backup_retention_period: integer + :param backup_retention_period: + The number of days to retain automated backups. Setting this parameter + to a positive number enables backups. Setting this parameter to 0 + disables automated backups. + + Default: 1 + + Constraints: + + + + Must be a value from 0 to 8 + + :type preferred_backup_window: string + :param preferred_backup_window: The daily time range during which + automated backups are created if automated backups are enabled, + using the `BackupRetentionPeriod` parameter. + Default: A 30-minute window selected at random from an 8-hour block of + time per region. See the Amazon RDS User Guide for the time blocks + for each region from which the default backup windows are assigned. + + Constraints: Must be in the format `hh24:mi-hh24:mi`. Times should be + Universal Time Coordinated (UTC). Must not conflict with the + preferred maintenance window. Must be at least 30 minutes. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if backup_retention_period is not None: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window is not None: + params['PreferredBackupWindow'] = preferred_backup_window + return self._make_request( + action='PromoteReadReplica', + verb='POST', + path='/', params=params) + + def purchase_reserved_db_instances_offering(self, + reserved_db_instances_offering_id, + reserved_db_instance_id=None, + db_instance_count=None, + tags=None): + """ + Purchases a reserved DB instance offering. + + :type reserved_db_instances_offering_id: string + :param reserved_db_instances_offering_id: The ID of the Reserved DB + instance offering to purchase. + Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + + :type reserved_db_instance_id: string + :param reserved_db_instance_id: Customer-specified identifier to track + this reservation. + Example: myreservationID + + :type db_instance_count: integer + :param db_instance_count: The number of instances to reserve. + Default: `1` + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'ReservedDBInstancesOfferingId': reserved_db_instances_offering_id, + } + if reserved_db_instance_id is not None: + params['ReservedDBInstanceId'] = reserved_db_instance_id + if db_instance_count is not None: + params['DBInstanceCount'] = db_instance_count + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='PurchaseReservedDBInstancesOffering', + verb='POST', + path='/', params=params) + + def reboot_db_instance(self, db_instance_identifier, force_failover=None): + """ + Rebooting a DB instance restarts the database engine service. + A reboot also applies to the DB instance any modifications to + the associated DB parameter group that were pending. Rebooting + a DB instance results in a momentary outage of the instance, + during which the DB instance status is set to rebooting. If + the RDS instance is configured for MultiAZ, it is possible + that the reboot will be conducted through a failover. An + Amazon RDS event is created when the reboot is completed. + + If your DB instance is deployed in multiple Availability + Zones, you can force a failover from one AZ to the other + during the reboot. You might force a failover to test the + availability of your DB instance deployment or to restore + operations to the original AZ after a failover occurs. + + The time required to reboot is a function of the specific + database engine's crash recovery process. To improve the + reboot time, we recommend that you reduce database activities + as much as possible during the reboot process to reduce + rollback activity for in-transit transactions. + + :type db_instance_identifier: string + :param db_instance_identifier: + The DB instance identifier. This parameter is stored as a lowercase + string. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type force_failover: boolean + :param force_failover: When `True`, the reboot will be conducted + through a MultiAZ failover. + Constraint: You cannot specify `True` if the instance is not configured + for MultiAZ. + + """ + params = {'DBInstanceIdentifier': db_instance_identifier, } + if force_failover is not None: + params['ForceFailover'] = str( + force_failover).lower() + return self._make_request( + action='RebootDBInstance', + verb='POST', + path='/', params=params) + + def remove_source_identifier_from_subscription(self, subscription_name, + source_identifier): + """ + Removes a source identifier from an existing RDS event + notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the RDS event notification + subscription you want to remove a source identifier from. + + :type source_identifier: string + :param source_identifier: The source identifier to be removed from the + subscription, such as the **DB instance identifier** for a DB + instance or the name of a security group. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SourceIdentifier': source_identifier, + } + return self._make_request( + action='RemoveSourceIdentifierFromSubscription', + verb='POST', + path='/', params=params) + + def remove_tags_from_resource(self, resource_name, tag_keys): + """ + Removes metadata tags from an Amazon RDS resource. + + For an overview on tagging an Amazon RDS resource, see + `Tagging Amazon RDS Resources`_. + + :type resource_name: string + :param resource_name: The Amazon RDS resource the tags will be removed + from. This value is an Amazon Resource Name (ARN). For information + about creating an ARN, see ` Constructing an RDS Amazon Resource + Name (ARN)`_. + + :type tag_keys: list + :param tag_keys: The tag key (name) of the tag to be removed. + + """ + params = {'ResourceName': resource_name, } + self.build_list_params(params, + tag_keys, + 'TagKeys.member') + return self._make_request( + action='RemoveTagsFromResource', + verb='POST', + path='/', params=params) + + def reset_db_parameter_group(self, db_parameter_group_name, + reset_all_parameters=None, parameters=None): + """ + Modifies the parameters of a DB parameter group to the + engine/system default value. To reset specific parameters + submit a list of the following: `ParameterName` and + `ApplyMethod`. To reset the entire DB parameter group, specify + the `DBParameterGroup` name and `ResetAllParameters` + parameters. When resetting the entire group, dynamic + parameters are updated immediately and static parameters are + set to `pending-reboot` to take effect on the next DB instance + restart or `RebootDBInstance` request. + + :type db_parameter_group_name: string + :param db_parameter_group_name: + The name of the DB parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type reset_all_parameters: boolean + :param reset_all_parameters: Specifies whether ( `True`) or not ( + `False`) to reset all parameters in the DB parameter group to + default values. + Default: `True` + + :type parameters: list + :param parameters: An array of parameter names, values, and the apply + method for the parameter update. At least one parameter name, + value, and apply method must be supplied; subsequent arguments are + optional. A maximum of 20 parameters may be modified in a single + request. + **MySQL** + + Valid Values (for Apply method): `immediate` | `pending-reboot` + + You can use the immediate value with dynamic parameters only. You can + use the `pending-reboot` value for both dynamic and static + parameters, and changes are applied when DB instance reboots. + + **Oracle** + + Valid Values (for Apply method): `pending-reboot` + + """ + params = {'DBParameterGroupName': db_parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'ApplyType', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion', 'ApplyMethod')) + return self._make_request( + action='ResetDBParameterGroup', + verb='POST', + path='/', params=params) + + def restore_db_instance_from_db_snapshot(self, db_instance_identifier, + db_snapshot_identifier, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Creates a new DB instance from a DB snapshot. The target + database is created from the source database restore point + with the same configuration as the original source database, + except that the new RDS instance is created with the default + security group. + + :type db_instance_identifier: string + :param db_instance_identifier: + The identifier for the DB snapshot to restore from. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type db_snapshot_identifier: string + :param db_snapshot_identifier: Name of the DB instance to create from + the DB snapshot. This parameter isn't case sensitive. + Constraints: + + + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + :type port: integer + :param port: The port number on which the database accepts connections. + Default: The same port as the original DB instance + + Constraints: Value must be `1150-65535` + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter doesn't apply to the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: Specifies the amount of provisioned IOPS for the DB + instance, expressed in I/O operations per second. If this parameter + is not specified, the IOPS value will be taken from the backup. If + this parameter is set to 0, the new instance will be converted to a + non-PIOPS instance, which will take additional time, though your DB + instance will be available for connections before the conversion + starts. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'DBInstanceIdentifier': db_instance_identifier, + 'DBSnapshotIdentifier': db_snapshot_identifier, + } + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceFromDBSnapshot', + verb='POST', + path='/', params=params) + + def restore_db_instance_to_point_in_time(self, + source_db_instance_identifier, + target_db_instance_identifier, + restore_time=None, + use_latest_restorable_time=None, + db_instance_class=None, + port=None, + availability_zone=None, + db_subnet_group_name=None, + multi_az=None, + publicly_accessible=None, + auto_minor_version_upgrade=None, + license_model=None, + db_name=None, engine=None, + iops=None, + option_group_name=None, + tags=None): + """ + Restores a DB instance to an arbitrary point-in-time. Users + can restore to any point in time before the + latestRestorableTime for up to backupRetentionPeriod days. The + target database is created from the source database with the + same configuration as the original database except that the DB + instance is created with the default DB security group. + + :type source_db_instance_identifier: string + :param source_db_instance_identifier: + The identifier of the source DB instance from which to restore. + + Constraints: + + + + Must be the identifier of an existing database instance + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type target_db_instance_identifier: string + :param target_db_instance_identifier: + The name of the new database instance to be created. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type restore_time: timestamp + :param restore_time: The date and time to restore from. + Valid Values: Value must be a UTC time + + Constraints: + + + + Must be before the latest restorable time for the DB instance + + Cannot be specified if UseLatestRestorableTime parameter is true + + + Example: `2009-09-07T23:45:00Z` + + :type use_latest_restorable_time: boolean + :param use_latest_restorable_time: Specifies whether ( `True`) or not ( + `False`) the DB instance is restored from the latest backup time. + Default: `False` + + Constraints: Cannot be specified if RestoreTime parameter is provided. + + :type db_instance_class: string + :param db_instance_class: The compute and memory capacity of the Amazon + RDS DB instance. + Valid Values: `db.t1.micro | db.m1.small | db.m1.medium | db.m1.large | + db.m1.xlarge | db.m2.2xlarge | db.m2.4xlarge` + + Default: The same DBInstanceClass as the original DB instance. + + :type port: integer + :param port: The port number on which the database accepts connections. + Constraints: Value must be `1150-65535` + + Default: The same port as the original DB instance. + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone that the database + instance will be created in. + Default: A random, system-chosen Availability Zone. + + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to true. + + Example: `us-east-1a` + + :type db_subnet_group_name: string + :param db_subnet_group_name: The DB subnet group name to use for the + new instance. + + :type multi_az: boolean + :param multi_az: Specifies if the DB instance is a Multi-AZ deployment. + Constraint: You cannot specify the AvailabilityZone parameter if the + MultiAZ parameter is set to `True`. + + :type publicly_accessible: boolean + :param publicly_accessible: Specifies the accessibility options for the + DB instance. A value of true specifies an Internet-facing instance + with a publicly resolvable DNS name, which resolves to a public IP + address. A value of false specifies an internal instance with a DNS + name that resolves to a private IP address. + Default: The default behavior varies depending on whether a VPC has + been requested or not. The following list shows the default + behavior in each case. + + + + **Default VPC:**true + + **VPC:**false + + + If no DB subnet group has been specified as part of the request and the + PubliclyAccessible value has not been set, the DB instance will be + publicly accessible. If a specific DB subnet group has been + specified as part of the request and the PubliclyAccessible value + has not been set, the DB instance will be private. + + :type auto_minor_version_upgrade: boolean + :param auto_minor_version_upgrade: Indicates that minor version + upgrades will be applied automatically to the DB instance during + the maintenance window. + + :type license_model: string + :param license_model: License model information for the restored DB + instance. + Default: Same as source. + + Valid values: `license-included` | `bring-your-own-license` | `general- + public-license` + + :type db_name: string + :param db_name: + The database name for the restored DB instance. + + + This parameter is not used for the MySQL engine. + + :type engine: string + :param engine: The database engine to use for the new instance. + Default: The same as source + + Constraint: Must be compatible with the engine of the source + + Example: `oracle-ee` + + :type iops: integer + :param iops: The amount of Provisioned IOPS (input/output operations + per second) to be initially allocated for the DB instance. + Constraints: Must be an integer greater than 1000. + + :type option_group_name: string + :param option_group_name: The name of the option group to be used for + the restored DB instance. + Permanent options, such as the TDE option for Oracle Advanced Security + TDE, cannot be removed from an option group, and that option group + cannot be removed from a DB instance once it is associated with a + DB instance + + :type tags: list + :param tags: A list of tags. Tags must be passed as tuples in the form + [('key1', 'valueForKey1'), ('key2', 'valueForKey2')] + + """ + params = { + 'SourceDBInstanceIdentifier': source_db_instance_identifier, + 'TargetDBInstanceIdentifier': target_db_instance_identifier, + } + if restore_time is not None: + params['RestoreTime'] = restore_time + if use_latest_restorable_time is not None: + params['UseLatestRestorableTime'] = str( + use_latest_restorable_time).lower() + if db_instance_class is not None: + params['DBInstanceClass'] = db_instance_class + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if db_subnet_group_name is not None: + params['DBSubnetGroupName'] = db_subnet_group_name + if multi_az is not None: + params['MultiAZ'] = str( + multi_az).lower() + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if auto_minor_version_upgrade is not None: + params['AutoMinorVersionUpgrade'] = str( + auto_minor_version_upgrade).lower() + if license_model is not None: + params['LicenseModel'] = license_model + if db_name is not None: + params['DBName'] = db_name + if engine is not None: + params['Engine'] = engine + if iops is not None: + params['Iops'] = iops + if option_group_name is not None: + params['OptionGroupName'] = option_group_name + if tags is not None: + self.build_complex_list_params( + params, tags, + 'Tags.member', + ('Key', 'Value')) + return self._make_request( + action='RestoreDBInstanceToPointInTime', + verb='POST', + path='/', params=params) + + def revoke_db_security_group_ingress(self, db_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_id=None, + ec2_security_group_owner_id=None): + """ + Revokes ingress from a DBSecurityGroup for previously + authorized IP ranges or EC2 or VPC Security Groups. Required + parameters for this API are one of CIDRIP, EC2SecurityGroupId + for VPC, or (EC2SecurityGroupOwnerId and either + EC2SecurityGroupName or EC2SecurityGroupId). + + :type db_security_group_name: string + :param db_security_group_name: The name of the DB security group to + revoke ingress from. + + :type cidrip: string + :param cidrip: The IP range to revoke access from. Must be a valid CIDR + range. If `CIDRIP` is specified, `EC2SecurityGroupName`, + `EC2SecurityGroupId` and `EC2SecurityGroupOwnerId` cannot be + provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_id: string + :param ec2_security_group_id: The id of the EC2 security group to + revoke access from. For VPC DB security groups, + `EC2SecurityGroupId` must be provided. Otherwise, + EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or + `EC2SecurityGroupId` must be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS Account Number of the owner + of the EC2 security group specified in the `EC2SecurityGroupName` + parameter. The AWS Access Key ID is not an acceptable value. For + VPC DB security groups, `EC2SecurityGroupId` must be provided. + Otherwise, EC2SecurityGroupOwnerId and either + `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided. + + """ + params = {'DBSecurityGroupName': db_security_group_name, } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_id is not None: + params['EC2SecurityGroupId'] = ec2_security_group_id + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeDBSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/redshift/__init__.py b/ext/boto/redshift/__init__.py new file mode 100644 index 0000000000..bdc4ade81c --- /dev/null +++ b/ext/boto/redshift/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the AWS Redshift service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.redshift.layer1 import RedshiftConnection + return get_regions('redshift', connection_cls=RedshiftConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.redshift.layer1 import RedshiftConnection + return connect('redshift', region_name, + connection_cls=RedshiftConnection, **kw_params) diff --git a/ext/boto/redshift/exceptions.py b/ext/boto/redshift/exceptions.py new file mode 100644 index 0000000000..70339225ff --- /dev/null +++ b/ext/boto/redshift/exceptions.py @@ -0,0 +1,459 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class ClusterNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSnapshotStateFault(JSONResponseError): + pass + + +class ClusterSnapshotNotFoundFault(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFoundFault(JSONResponseError): + pass + + +class InvalidSubnet(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceededFault(JSONResponseError): + pass + + +class InvalidClusterStateFault(JSONResponseError): + pass + + +class InvalidClusterParameterGroupStateFault(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExistsFault(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupStateFault(JSONResponseError): + pass + + +class InvalidRestoreFault(JSONResponseError): + pass + + +class AuthorizationNotFoundFault(JSONResponseError): + pass + + +class ResizeNotFoundFault(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExistsFault(JSONResponseError): + pass + + +class AuthorizationQuotaExceededFault(JSONResponseError): + pass + + +class AuthorizationAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceededFault(JSONResponseError): + pass + + +class ReservedNodeNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError): + pass + + +class ClusterParameterGroupNotFoundFault(JSONResponseError): + pass + + +class ReservedNodeQuotaExceededFault(JSONResponseError): + pass + + +class ClusterQuotaExceededFault(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceededFault(JSONResponseError): + pass + + +class UnsupportedOptionFault(JSONResponseError): + pass + + +class InvalidVPCNetworkStateFault(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFoundFault(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupStateFault(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFoundFault(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceededFault(JSONResponseError): + pass + + +class ClusterAlreadyExistsFault(JSONResponseError): + pass + + +class InsufficientClusterCapacityFault(JSONResponseError): + pass + + +class InvalidClusterSubnetStateFault(JSONResponseError): + pass + + +class SubnetAlreadyInUse(JSONResponseError): + pass + + +class InvalidParameterCombinationFault(JSONResponseError): + pass + + +class AccessToSnapshotDeniedFault(JSONResponseError): + pass + + +class UnauthorizedOperationFault(JSONResponseError): + pass + + +class SnapshotCopyAlreadyDisabled(JSONResponseError): + pass + + +class ClusterNotFound(JSONResponseError): + pass + + +class UnknownSnapshotCopyRegion(JSONResponseError): + pass + + +class InvalidClusterSubnetState(JSONResponseError): + pass + + +class ReservedNodeQuotaExceeded(JSONResponseError): + pass + + +class InvalidClusterState(JSONResponseError): + pass + + +class HsmClientCertificateQuotaExceeded(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class HsmClientCertificateNotFound(JSONResponseError): + pass + + +class SubscriptionEventIdNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationAlreadyExists(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceeded(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFound(JSONResponseError): + pass + + +class BucketNotFound(JSONResponseError): + pass + + +class InsufficientClusterCapacity(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class UnauthorizedOperation(JSONResponseError): + pass + + +class ClusterQuotaExceeded(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ClusterSnapshotNotFound(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class InvalidHsmClientCertificateState(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class ResizeNotFound(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceeded(JSONResponseError): + pass + + +class AccessToSnapshotDenied(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupState(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceeded(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceeded(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFound(JSONResponseError): + pass + + +class InvalidElasticIp(JSONResponseError): + pass + + +class InvalidClusterParameterGroupState(JSONResponseError): + pass + + +class InvalidHsmConfigurationState(JSONResponseError): + pass + + + +class ClusterAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationQuotaExceeded(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExists(JSONResponseError): + pass + + +class SubscriptionSeverityNotFound(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class ReservedNodeAlreadyExists(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class ClusterParameterGroupNotFound(JSONResponseError): + pass + + +class InvalidS3BucketName(JSONResponseError): + pass + + +class InvalidS3KeyPrefix(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class HsmConfigurationNotFound(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSnapshotState(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class SnapshotCopyDisabled(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class ReservedNodeNotFound(JSONResponseError): + pass + + +class HsmClientCertificateAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupState(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class InsufficientS3BucketPolicy(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExists(JSONResponseError): + pass + + +class UnsupportedOption(JSONResponseError): + pass + + +class CopyToRegionDisabled(JSONResponseError): + pass + + +class SnapshotCopyAlreadyEnabled(JSONResponseError): + pass + + +class IncompatibleOrderableOptions(JSONResponseError): + pass + + +class InvalidSubscriptionState(JSONResponseError): + pass diff --git a/ext/boto/redshift/layer1.py b/ext/boto/redshift/layer1.py new file mode 100644 index 0000000000..be1529fdf8 --- /dev/null +++ b/ext/boto/redshift/layer1.py @@ -0,0 +1,3097 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.redshift import exceptions + + +class RedshiftConnection(AWSQueryConnection): + """ + Amazon Redshift **Overview** + This is an interface reference for Amazon Redshift. It contains + documentation for one of the programming or command line + interfaces you can use to manage Amazon Redshift clusters. Note + that Amazon Redshift is asynchronous, which means that some + interfaces may require techniques, such as polling or asynchronous + callback handlers, to determine when a command has been applied. + In this reference, the parameter descriptions indicate whether a + change is applied immediately, on the next instance reboot, or + during the next maintenance window. For a summary of the Amazon + Redshift cluster management interfaces, go to `Using the Amazon + Redshift Management Interfaces `_. + + Amazon Redshift manages all the work of setting up, operating, and + scaling a data warehouse: provisioning capacity, monitoring and + backing up the cluster, and applying patches and upgrades to the + Amazon Redshift engine. You can focus on using your data to + acquire new insights for your business and customers. + + If you are a first-time user of Amazon Redshift, we recommend that + you begin by reading the The `Amazon Redshift Getting Started + Guide`_ + + If you are a database developer, the `Amazon Redshift Database + Developer Guide`_ explains how to design, build, query, and + maintain the databases that make up your data warehouse. + """ + APIVersion = "2012-12-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com" + ResponseError = JSONResponseError + + _faults = { + "SnapshotCopyAlreadyDisabled": exceptions.SnapshotCopyAlreadyDisabled, + "ClusterNotFound": exceptions.ClusterNotFound, + "UnknownSnapshotCopyRegion": exceptions.UnknownSnapshotCopyRegion, + "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetState, + "InvalidSubnet": exceptions.InvalidSubnet, + "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceeded, + "InvalidClusterState": exceptions.InvalidClusterState, + "HsmClientCertificateQuotaExceeded": exceptions.HsmClientCertificateQuotaExceeded, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "HsmClientCertificateNotFound": exceptions.HsmClientCertificateNotFound, + "SubscriptionEventIdNotFound": exceptions.SubscriptionEventIdNotFound, + "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExists, + "HsmConfigurationAlreadyExists": exceptions.HsmConfigurationAlreadyExists, + "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceeded, + "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFound, + "BucketNotFound": exceptions.BucketNotFound, + "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacity, + "InvalidRestore": exceptions.InvalidRestore, + "UnauthorizedOperation": exceptions.UnauthorizedOperation, + "ClusterQuotaExceeded": exceptions.ClusterQuotaExceeded, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFound, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "InvalidHsmClientCertificateState": exceptions.InvalidHsmClientCertificateState, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "ResizeNotFound": exceptions.ResizeNotFound, + "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceeded, + "AccessToSnapshotDenied": exceptions.AccessToSnapshotDenied, + "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupState, + "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceeded, + "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceeded, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFound, + "InvalidElasticIp": exceptions.InvalidElasticIp, + "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupState, + "InvalidHsmConfigurationState": exceptions.InvalidHsmConfigurationState, + "ClusterAlreadyExists": exceptions.ClusterAlreadyExists, + "HsmConfigurationQuotaExceeded": exceptions.HsmConfigurationQuotaExceeded, + "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExists, + "SubscriptionSeverityNotFound": exceptions.SubscriptionSeverityNotFound, + "SourceNotFound": exceptions.SourceNotFound, + "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExists, + "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceeded, + "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFound, + "InvalidS3BucketName": exceptions.InvalidS3BucketName, + "InvalidS3KeyPrefix": exceptions.InvalidS3KeyPrefix, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "HsmConfigurationNotFound": exceptions.HsmConfigurationNotFound, + "InvalidSubscriptionState": exceptions.InvalidSubscriptionState, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceeded, + "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotState, + "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceeded, + "SnapshotCopyDisabled": exceptions.SnapshotCopyDisabled, + "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExists, + "ReservedNodeNotFound": exceptions.ReservedNodeNotFound, + "HsmClientCertificateAlreadyExists": exceptions.HsmClientCertificateAlreadyExists, + "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupState, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "InsufficientS3BucketPolicy": exceptions.InsufficientS3BucketPolicy, + "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExists, + "UnsupportedOption": exceptions.UnsupportedOption, + "CopyToRegionDisabled": exceptions.CopyToRegionDisabled, + "SnapshotCopyAlreadyEnabled": exceptions.SnapshotCopyAlreadyEnabled, + "IncompatibleOrderableOptions": exceptions.IncompatibleOrderableOptions, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(RedshiftConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def authorize_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Adds an inbound (ingress) rule to an Amazon Redshift security + group. Depending on whether the application accessing your + cluster is running on the Internet or an EC2 instance, you can + authorize inbound access to either a Classless Interdomain + Routing (CIDR) IP address range or an EC2 security group. You + can add as many as 20 ingress rules to an Amazon Redshift + security group. + + For an overview of CIDR blocks, see the Wikipedia article on + `Classless Inter-Domain Routing`_. + + You must also associate the security group with a cluster so + that clients running on these IP addresses or the EC2 instance + are authorized to connect to the cluster. For information + about managing security groups, go to `Working with Security + Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security group to + which the ingress rule is added. + + :type cidrip: string + :param cidrip: The IP range to be added the Amazon Redshift security + group. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The EC2 security group to be added the + Amazon Redshift security group. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified by the EC2SecurityGroupName + parameter. The AWS Access Key ID is not an acceptable value. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='AuthorizeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def authorize_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Authorizes the specified AWS customer account to restore the + specified snapshot. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot the account + is authorized to restore. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account authorized to restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='AuthorizeSnapshotAccess', + verb='POST', + path='/', params=params) + + def copy_cluster_snapshot(self, source_snapshot_identifier, + target_snapshot_identifier, + source_snapshot_cluster_identifier=None): + """ + Copies the specified automated cluster snapshot to a new + manual cluster snapshot. The source must be an automated + snapshot and it must be in the available state. + + When you delete a cluster, Amazon Redshift deletes any + automated snapshots of the cluster. Also, when the retention + period of the snapshot expires, Amazon Redshift automatically + deletes it. If you want to keep an automated snapshot for a + longer period, you can make a manual copy of the snapshot. + Manual snapshots are retained until you delete them. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type source_snapshot_identifier: string + :param source_snapshot_identifier: + The identifier for the source snapshot. + + Constraints: + + + + Must be the identifier for a valid automated snapshot whose state is + `available`. + + :type source_snapshot_cluster_identifier: string + :param source_snapshot_cluster_identifier: + The identifier of the cluster the source snapshot was created from. + This parameter is required if your IAM user has a policy containing + a snapshot resource element that specifies anything other than * + for the cluster name. + + Constraints: + + + + Must be the identifier for a valid cluster. + + :type target_snapshot_identifier: string + :param target_snapshot_identifier: + The identifier given to the new manual snapshot. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for the AWS account that is making the request. + + """ + params = { + 'SourceSnapshotIdentifier': source_snapshot_identifier, + 'TargetSnapshotIdentifier': target_snapshot_identifier, + } + if source_snapshot_cluster_identifier is not None: + params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier + return self._make_request( + action='CopyClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster(self, cluster_identifier, node_type, master_username, + master_user_password, db_name=None, cluster_type=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + cluster_subnet_group_name=None, + availability_zone=None, + preferred_maintenance_window=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, port=None, + cluster_version=None, allow_version_upgrade=None, + number_of_nodes=None, publicly_accessible=None, + encrypted=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, elastic_ip=None): + """ + Creates a new cluster. To create the cluster in virtual + private cloud (VPC), you must provide cluster subnet group + name. If you don't provide a cluster subnet group name or the + cluster security group parameter, Amazon Redshift creates a + non-VPC cluster, it associates the default cluster security + group with the cluster. For more information about managing + clusters, go to `Amazon Redshift Clusters`_ in the Amazon + Redshift Management Guide . + + :type db_name: string + :param db_name: + The name of the first database to be created when the cluster is + created. + + To create additional databases after the cluster is created, connect to + the cluster with a SQL client and use SQL commands to create a + database. For more information, go to `Create a Database`_ in the + Amazon Redshift Database Developer Guide. + + Default: `dev` + + Constraints: + + + + Must contain 1 to 64 alphanumeric characters. + + Must contain only lowercase letters. + + Cannot be a word that is reserved by the service. A list of reserved + words can be found in `Reserved Words`_ in the Amazon Redshift + Database Developer Guide. + + :type cluster_identifier: string + :param cluster_identifier: A unique identifier for the cluster. You use + this identifier to refer to the cluster for any subsequent cluster + operations such as deleting or modifying. The identifier also + appears in the Amazon Redshift console. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + + Example: `myexamplecluster` + + :type cluster_type: string + :param cluster_type: The type of the cluster. When cluster type is + specified as + + + `single-node`, the **NumberOfNodes** parameter is not required. + + `multi-node`, the **NumberOfNodes** parameter is required. + + + Valid Values: `multi-node` | `single-node` + + Default: `multi-node` + + :type node_type: string + :param node_type: The node type to be provisioned for the cluster. For + information about node types, go to ` Working with Clusters`_ in + the Amazon Redshift Management Guide . + Valid Values: `dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | + `dw2.8xlarge`. + + :type master_username: string + :param master_username: + The user name associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be 1 - 128 alphanumeric characters. + + First character must be a letter. + + Cannot be a reserved word. A list of reserved words can be found in + `Reserved Words`_ in the Amazon Redshift Database Developer Guide. + + :type master_user_password: string + :param master_user_password: + The password associated with the master user account for the cluster + that is being created. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_security_groups: list + :param cluster_security_groups: A list of security groups to be + associated with this cluster. + Default: The default cluster security group for Amazon Redshift. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + Default: The default VPC security group is associated with the cluster. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of a cluster subnet group to + be associated with this cluster. + If this parameter is not provided the resulting cluster will be + deployed outside virtual private cloud (VPC). + + :type availability_zone: string + :param availability_zone: The EC2 Availability Zone (AZ) in which you + want Amazon Redshift to provision the cluster. For example, if you + have several EC2 instances running in a specific Availability Zone, + then you might want the cluster to be provisioned in the same zone + in order to decrease network latency. + Default: A random, system-chosen Availability Zone in the region that + is specified by the endpoint. + + Example: `us-east-1d` + + Constraint: The specified Availability Zone must be in the same region + as the current endpoint. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which automated cluster maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: A 30-minute window selected at random from an 8-hour block of + time per region, occurring on a random day of the week. The + following list shows the time blocks for each region from which the + default maintenance windows are assigned. + + + + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC + + **US-West (Oregon) Region** 06:00-14:00 UTC + + **EU (Ireland) Region** 22:00-06:00 UTC + + **Asia Pacific (Singapore) Region** 14:00-22:00 UTC + + **Asia Pacific (Sydney) Region** 12:00-20:00 UTC + + **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC + + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Minimum 30-minute window. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: + The name of the parameter group to be associated with this cluster. + + Default: The default Amazon Redshift cluster parameter group. For + information about the default parameter group, go to `Working with + Amazon Redshift Parameter Groups`_ + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + Default: `1` + + Constraints: Must be a value from 0 to 35. + + :type port: integer + :param port: The port number on which the cluster accepts incoming + connections. + The cluster is accessible only via the JDBC and ODBC connection + strings. Part of the connection string requires the port on which + the cluster will listen for incoming connections. + + Default: `5439` + + Valid Values: `1150-65535` + + :type cluster_version: string + :param cluster_version: The version of the Amazon Redshift engine + software that you want to deploy on the cluster. + The version selected runs on all the nodes in the cluster. + + Constraints: Only version 1.0 is currently available. + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + When a new version of the Amazon Redshift engine is released, you can + request that the service automatically apply upgrades during the + maintenance window to the Amazon Redshift engine that is running on + your cluster. + + Default: `True` + + :type number_of_nodes: integer + :param number_of_nodes: The number of compute nodes in the cluster. + This parameter is required when the **ClusterType** parameter is + specified as `multi-node`. + For information about determining how many nodes you need, go to ` + Working with Clusters`_ in the Amazon Redshift Management Guide . + + If you don't specify this parameter, you get a single-node cluster. + When requesting a multi-node cluster, you must specify the number + of nodes that you want in the cluster. + + Default: `1` + + Constraints: Value must be at least 1 and no more than 100. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type encrypted: boolean + :param encrypted: If `True`, the data in the cluster is encrypted at + rest. + Default: false + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP (EIP) address for the cluster. + Constraints: The cluster must be provisioned in EC2-VPC and publicly- + accessible through an Internet gateway. For more information about + provisioning clusters in EC2-VPC, go to `Supported Platforms to + Launch Your Cluster`_ in the Amazon Redshift Management Guide. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'NodeType': node_type, + 'MasterUsername': master_username, + 'MasterUserPassword': master_user_password, + } + if db_name is not None: + params['DBName'] = db_name + if cluster_type is not None: + params['ClusterType'] = cluster_type + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if port is not None: + params['Port'] = port + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if encrypted is not None: + params['Encrypted'] = str( + encrypted).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip + return self._make_request( + action='CreateCluster', + verb='POST', + path='/', params=params) + + def create_cluster_parameter_group(self, parameter_group_name, + parameter_group_family, description): + """ + Creates an Amazon Redshift parameter group. + + Creating parameter groups is independent of creating clusters. + You can associate a cluster with a parameter group when you + create the cluster. You can also associate an existing cluster + with a parameter group after the cluster is created by using + ModifyCluster. + + Parameters in the parameter group define specific behavior + that applies to the databases you create on the cluster. For + more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: + The name of the cluster parameter group. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique within your AWS account. + + This value is stored as a lower-case string. + + :type parameter_group_family: string + :param parameter_group_family: The Amazon Redshift engine version to + which the cluster parameter group applies. The cluster engine + version determines the set of parameters. + To get a list of valid parameter group family names, you can call + DescribeClusterParameterGroups. By default, Amazon Redshift returns + a list of all the parameter groups that are owned by your AWS + account, including the default parameter groups for each Amazon + Redshift engine version. The parameter group family names + associated with the default parameter groups provide you the valid + values. For example, a valid family name is "redshift-1.0". + + :type description: string + :param description: A description of the parameter group. + + """ + params = { + 'ParameterGroupName': parameter_group_name, + 'ParameterGroupFamily': parameter_group_family, + 'Description': description, + } + return self._make_request( + action='CreateClusterParameterGroup', + verb='POST', + path='/', params=params) + + def create_cluster_security_group(self, cluster_security_group_name, + description): + """ + Creates a new Amazon Redshift security group. You use security + groups to control access to non-VPC clusters. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name for the security group. + Amazon Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all security groups that are created by your AWS + account. + + + Example: `examplesecuritygroup` + + :type description: string + :param description: A description for the security group. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + 'Description': description, + } + return self._make_request( + action='CreateClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def create_cluster_snapshot(self, snapshot_identifier, + cluster_identifier): + """ + Creates a manual snapshot of the specified cluster. The + cluster must be in the `available` state. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: A unique identifier for the snapshot that + you are requesting. This identifier must be unique for all + snapshots within the AWS account. + Constraints: + + + + Cannot be null, empty, or blank + + Must contain from 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + + Example: `my-snapshot-id` + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier for which you want a + snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'ClusterIdentifier': cluster_identifier, + } + return self._make_request( + action='CreateClusterSnapshot', + verb='POST', + path='/', params=params) + + def create_cluster_subnet_group(self, cluster_subnet_group_name, + description, subnet_ids): + """ + Creates a new Amazon Redshift subnet group. You must provide a + list of one or more subnets in your existing Amazon Virtual + Private Cloud (Amazon VPC) when creating Amazon Redshift + subnet group. + + For information about subnet groups, go to `Amazon Redshift + Cluster Subnet Groups`_ in the Amazon Redshift Management + Guide . + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name for the subnet group. Amazon + Redshift stores the value as a lowercase string. + Constraints: + + + + Must contain no more than 255 alphanumeric characters or hyphens. + + Must not be "Default". + + Must be unique for all subnet groups that are created by your AWS + account. + + + Example: `examplesubnetgroup` + + :type description: string + :param description: A description for the subnet group. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + 'Description': description, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + return self._make_request( + action='CreateClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, source_ids=None, + event_categories=None, severity=None, + enabled=None): + """ + Creates an Amazon Redshift event notification subscription. + This action requires an ARN (Amazon Resource Name) of an + Amazon SNS topic created by either the Amazon Redshift + console, the Amazon SNS console, or the Amazon SNS API. To + obtain an ARN with Amazon SNS, you must create a topic in + Amazon SNS and subscribe to the topic. The ARN is displayed in + the SNS console. + + You can specify the source type, and lists of Amazon Redshift + source IDs, event categories, and event severities. + Notifications will be sent for all events you want that match + those criteria. For example, you can specify source type = + cluster, source ID = my-cluster-1 and mycluster2, event + categories = Availability, Backup, and severity = ERROR. The + subscription will only send notifications for those ERROR + events in the Availability and Backup categories for the + specified clusters. + + If you specify both the source type and source IDs, such as + source type = cluster and source identifier = my-cluster-1, + notifications will be sent for all the cluster events for my- + cluster-1. If you specify a source type but do not specify a + source identifier, you will receive notice of the events for + the objects of that type in your AWS account. If you do not + specify either the SourceType nor the SourceIdentifier, you + will be notified of events generated from all Amazon Redshift + sources belonging to your AWS account. You must specify a + source type if you specify a source ID. + + :type subscription_name: string + :param subscription_name: + The name of the event subscription to be created. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the Amazon SNS + topic used to transmit the event notifications. The ARN is created + by Amazon SNS when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value; set to `True` to activate the + subscription, set to `False` to create the subscription but not + active it. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Creates an HSM client certificate that an Amazon Redshift + cluster will use to connect to the client's HSM in order to + store and retrieve the keys used to encrypt the cluster + databases. + + The command returns a public key, which you must store in the + HSM. In addition to creating the HSM certificate, you must + create an Amazon Redshift HSM configuration that provides a + cluster the information needed to store and use encryption + keys in the HSM. For more information, go to `Hardware + Security Modules`_ in the Amazon Redshift Management Guide. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier to be assigned + to the new HSM client certificate that the cluster will use to + connect to the HSM to use the database encryption keys. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='CreateHsmClientCertificate', + verb='POST', + path='/', params=params) + + def create_hsm_configuration(self, hsm_configuration_identifier, + description, hsm_ip_address, + hsm_partition_name, hsm_partition_password, + hsm_server_public_certificate): + """ + Creates an HSM configuration that contains the information + required by an Amazon Redshift cluster to store and use + database encryption keys in a Hardware Security Module (HSM). + After creating the HSM configuration, you can specify it as a + parameter when creating a cluster. The cluster will then store + its encryption keys in the HSM. + + In addition to creating an HSM configuration, you must also + create an HSM client certificate. For more information, go to + `Hardware Security Modules`_ in the Amazon Redshift Management + Guide. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier to be assigned to + the new Amazon Redshift HSM configuration. + + :type description: string + :param description: A text description of the HSM configuration to be + created. + + :type hsm_ip_address: string + :param hsm_ip_address: The IP address that the Amazon Redshift cluster + must use to access the HSM. + + :type hsm_partition_name: string + :param hsm_partition_name: The name of the partition in the HSM where + the Amazon Redshift clusters will store their database encryption + keys. + + :type hsm_partition_password: string + :param hsm_partition_password: The password required to access the HSM + partition. + + :type hsm_server_public_certificate: string + :param hsm_server_public_certificate: The HSMs public certificate file. + When using Cloud HSM, the file name is server.pem. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + 'Description': description, + 'HsmIpAddress': hsm_ip_address, + 'HsmPartitionName': hsm_partition_name, + 'HsmPartitionPassword': hsm_partition_password, + 'HsmServerPublicCertificate': hsm_server_public_certificate, + } + return self._make_request( + action='CreateHsmConfiguration', + verb='POST', + path='/', params=params) + + def delete_cluster(self, cluster_identifier, + skip_final_cluster_snapshot=None, + final_cluster_snapshot_identifier=None): + """ + Deletes a previously provisioned cluster. A successful + response from the web service indicates that the request was + received correctly. If a final cluster snapshot is requested + the status of the cluster will be "final-snapshot" while the + snapshot is being taken, then it's "deleting" once Amazon + Redshift begins deleting the cluster. Use DescribeClusters to + monitor the status of the deletion. The delete operation + cannot be canceled or reverted once submitted. For more + information about managing clusters, go to `Amazon Redshift + Clusters`_ in the Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: + The identifier of the cluster to be deleted. + + Constraints: + + + + Must contain lowercase characters. + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type skip_final_cluster_snapshot: boolean + :param skip_final_cluster_snapshot: Determines whether a final snapshot + of the cluster is created before Amazon Redshift deletes the + cluster. If `True`, a final cluster snapshot is not created. If + `False`, a final cluster snapshot is created before the cluster is + deleted. + Default: `False` + + :type final_cluster_snapshot_identifier: string + :param final_cluster_snapshot_identifier: + The identifier of the final snapshot that is to be created immediately + before deleting the cluster. If this parameter is provided, + SkipFinalClusterSnapshot must be `False`. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if skip_final_cluster_snapshot is not None: + params['SkipFinalClusterSnapshot'] = str( + skip_final_cluster_snapshot).lower() + if final_cluster_snapshot_identifier is not None: + params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier + return self._make_request( + action='DeleteCluster', + verb='POST', + path='/', params=params) + + def delete_cluster_parameter_group(self, parameter_group_name): + """ + Deletes a specified Amazon Redshift parameter group. + + :type parameter_group_name: string + :param parameter_group_name: + The name of the parameter group to be deleted. + + Constraints: + + + + Must be the name of an existing cluster parameter group. + + Cannot delete a default cluster parameter group. + + """ + params = {'ParameterGroupName': parameter_group_name, } + return self._make_request( + action='DeleteClusterParameterGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_security_group(self, cluster_security_group_name): + """ + Deletes an Amazon Redshift security group. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the cluster security + group to be deleted. + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + return self._make_request( + action='DeleteClusterSecurityGroup', + verb='POST', + path='/', params=params) + + def delete_cluster_snapshot(self, snapshot_identifier, + snapshot_cluster_identifier=None): + """ + Deletes the specified manual snapshot. The snapshot must be in + the `available` state, with no other users authorized to + access the snapshot. + + Unlike automated snapshots, manual snapshots are retained even + after you delete your cluster. Amazon Redshift does not delete + your manual snapshots. You must delete manual snapshot + explicitly to avoid getting charged. If other accounts are + authorized to access the snapshot, you must revoke all of the + authorizations before you can delete the snapshot. + + :type snapshot_identifier: string + :param snapshot_identifier: The unique identifier of the manual + snapshot to be deleted. + Constraints: Must be the name of an existing snapshot that is in the + `available` state. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The unique identifier of the + cluster the snapshot was created from. This parameter is required + if your IAM user has a policy containing a snapshot resource + element that specifies anything other than * for the cluster name. + Constraints: Must be the name of valid cluster. + + """ + params = {'SnapshotIdentifier': snapshot_identifier, } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='DeleteClusterSnapshot', + verb='POST', + path='/', params=params) + + def delete_cluster_subnet_group(self, cluster_subnet_group_name): + """ + Deletes the specified cluster subnet group. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + name to be deleted. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + return self._make_request( + action='DeleteClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def delete_event_subscription(self, subscription_name): + """ + Deletes an Amazon Redshift event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be deleted. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Deletes the specified HSM client certificate. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of the HSM + client certificate to be deleted. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='DeleteHsmClientCertificate', + verb='POST', + path='/', params=params) + + def delete_hsm_configuration(self, hsm_configuration_identifier): + """ + Deletes the specified Amazon Redshift HSM configuration. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of the Amazon + Redshift HSM configuration to be deleted. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + } + return self._make_request( + action='DeleteHsmConfiguration', + verb='POST', + path='/', params=params) + + def describe_cluster_parameter_groups(self, parameter_group_name=None, + max_records=None, marker=None): + """ + Returns a list of Amazon Redshift parameter groups, including + parameter groups you created and the default parameter group. + For each parameter group, the response includes the parameter + group name, description, and parameter group family name. You + can optionally specify a name to retrieve the description of a + specific parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a specific parameter group for + which to return details. By default, details about all parameter + groups and the default parameter group are returned. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterParameterGroups request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if parameter_group_name is not None: + params['ParameterGroupName'] = parameter_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameterGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_parameters(self, parameter_group_name, source=None, + max_records=None, marker=None): + """ + Returns a detailed list of parameters contained within the + specified Amazon Redshift parameter group. For each parameter + the response includes information such as parameter name, + description, data type, value, whether the parameter value is + modifiable, and so on. + + You can specify source filter to retrieve parameters of only + specific type. For example, to retrieve parameters that were + modified by a user action such as from + ModifyClusterParameterGroup, you can specify source equal to + user . + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of a cluster parameter group for + which to return details. + + :type source: string + :param source: The parameter types to return. Specify `user` to show + parameters that are different form the default. Similarly, specify + `engine-default` to show parameters that are the same as the + default parameter group. + Default: All parameter types returned. + + Valid Values: `user` | `engine-default` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterParameters request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if source is not None: + params['Source'] = source + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterParameters', + verb='POST', + path='/', params=params) + + def describe_cluster_security_groups(self, + cluster_security_group_name=None, + max_records=None, marker=None): + """ + Returns information about Amazon Redshift security groups. If + the name of a security group is specified, the response will + contain only information about only that security group. + + For information about managing security groups, go to `Amazon + Redshift Cluster Security Groups`_ in the Amazon Redshift + Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of a cluster security + group for which you are requesting details. You can specify either + the **Marker** parameter or a **ClusterSecurityGroupName** + parameter, but not both. + Example: `securitygroup1` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSecurityGroups request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + Constraints: You can specify either the **ClusterSecurityGroupName** + parameter or the **Marker** parameter, but not both. + + """ + params = {} + if cluster_security_group_name is not None: + params['ClusterSecurityGroupName'] = cluster_security_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSecurityGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_snapshots(self, cluster_identifier=None, + snapshot_identifier=None, + snapshot_type=None, start_time=None, + end_time=None, max_records=None, + marker=None, owner_account=None): + """ + Returns one or more snapshot objects, which contain metadata + about your cluster snapshots. By default, this operation + returns information about all snapshots of all clusters that + are owned by you AWS customer account. No information is + returned for snapshots owned by inactive AWS customer + accounts. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster for which + information about snapshots is requested. + + :type snapshot_identifier: string + :param snapshot_identifier: The snapshot identifier of the snapshot + about which to return information. + + :type snapshot_type: string + :param snapshot_type: The type of snapshots for which you are + requesting information. By default, snapshots of all types are + returned. + Valid Values: `automated` | `manual` + + :type start_time: timestamp + :param start_time: A value that requests only snapshots created at or + after the specified time. The time value is specified in ISO 8601 + format. For more information about ISO 8601, go to the `ISO8601 + Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type end_time: timestamp + :param end_time: A time value that requests only snapshots created at + or before the specified time. The time value is specified in ISO + 8601 format. For more information about ISO 8601, go to the + `ISO8601 Wikipedia page.`_ + Example: `2012-07-16T18:00:00Z` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSnapshots request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Use this field to filter the results to snapshots + owned by a particular account. To describe snapshots you own, + either specify your AWS customer account, or do not specify the + parameter. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if snapshot_identifier is not None: + params['SnapshotIdentifier'] = snapshot_identifier + if snapshot_type is not None: + params['SnapshotType'] = snapshot_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + if owner_account is not None: + params['OwnerAccount'] = owner_account + return self._make_request( + action='DescribeClusterSnapshots', + verb='POST', + path='/', params=params) + + def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None, + max_records=None, marker=None): + """ + Returns one or more cluster subnet group objects, which + contain metadata about your cluster subnet groups. By default, + this operation returns information about all cluster subnet + groups that are defined in you AWS account. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the cluster subnet group + for which information is requested. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterSubnetGroups request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterSubnetGroups', + verb='POST', + path='/', params=params) + + def describe_cluster_versions(self, cluster_version=None, + cluster_parameter_group_family=None, + max_records=None, marker=None): + """ + Returns descriptions of the available Amazon Redshift cluster + versions. You can call this operation even before creating any + clusters to learn more about the Amazon Redshift versions. For + more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The specific cluster version to return. + Example: `1.0` + + :type cluster_parameter_group_family: string + :param cluster_parameter_group_family: + The name of a specific cluster parameter group family to return details + for. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusterVersions request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if cluster_parameter_group_family is not None: + params['ClusterParameterGroupFamily'] = cluster_parameter_group_family + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusterVersions', + verb='POST', + path='/', params=params) + + def describe_clusters(self, cluster_identifier=None, max_records=None, + marker=None): + """ + Returns properties of provisioned clusters including general + cluster properties, cluster database properties, maintenance + and backup properties, and security and access properties. + This operation supports pagination. For more information about + managing clusters, go to `Amazon Redshift Clusters`_ in the + Amazon Redshift Management Guide . + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + properties you are requesting. This parameter is case sensitive. + The default is that all clusters defined for an account are returned. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeClusters request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + Constraints: You can specify either the **ClusterIdentifier** parameter + or the **Marker** parameter, but not both. + + """ + params = {} + if cluster_identifier is not None: + params['ClusterIdentifier'] = cluster_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeClusters', + verb='POST', + path='/', params=params) + + def describe_default_cluster_parameters(self, parameter_group_family, + max_records=None, marker=None): + """ + Returns a list of parameter settings for the specified + parameter group family. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_family: string + :param parameter_group_family: The name of the cluster parameter group + family. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeDefaultClusterParameters request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {'ParameterGroupFamily': parameter_group_family, } + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeDefaultClusterParameters', + verb='POST', + path='/', params=params) + + def describe_event_categories(self, source_type=None): + """ + Displays a list of event categories for all event source + types, or for a specified source type. For a list of the event + categories and source types, go to `Amazon Redshift Event + Notifications`_. + + :type source_type: string + :param source_type: The source type, such as cluster or parameter + group, to which the described event categories apply. + Valid values: cluster, snapshot, parameter group, and security group. + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + max_records=None, marker=None): + """ + Lists descriptions of all the Amazon Redshift event + notifications subscription for a customer account. If you + specify a subscription name, lists the description for that + subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be described. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeEventSubscriptions request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + + def describe_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, duration=None, + max_records=None, marker=None): + """ + Returns events related to clusters, security groups, + snapshots, and parameter groups for the past 14 days. Events + specific to a particular cluster, security group, snapshot or + parameter group can be obtained by providing the name as a + parameter. By default, the past hour of events are returned. + + :type source_identifier: string + :param source_identifier: + The identifier of the event source for which events will be returned. + If this parameter is not specified, then all sources are included + in the response. + + Constraints: + + If SourceIdentifier is supplied, SourceType must also be provided. + + + + Specify a cluster identifier when SourceType is `cluster`. + + Specify a cluster security group name when SourceType is `cluster- + security-group`. + + Specify a cluster parameter group name when SourceType is `cluster- + parameter-group`. + + Specify a cluster snapshot identifier when SourceType is `cluster- + snapshot`. + + :type source_type: string + :param source_type: + The event source to retrieve events for. If no value is specified, all + events are returned. + + Constraints: + + If SourceType is supplied, SourceIdentifier must also be provided. + + + + Specify `cluster` when SourceIdentifier is a cluster identifier. + + Specify `cluster-security-group` when SourceIdentifier is a cluster + security group name. + + Specify `cluster-parameter-group` when SourceIdentifier is a cluster + parameter group name. + + Specify `cluster-snapshot` when SourceIdentifier is a cluster + snapshot identifier. + + :type start_time: timestamp + :param start_time: The beginning of the time interval to retrieve + events for, specified in ISO 8601 format. For more information + about ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type end_time: timestamp + :param end_time: The end of the time interval for which to retrieve + events, specified in ISO 8601 format. For more information about + ISO 8601, go to the `ISO8601 Wikipedia page.`_ + Example: `2009-07-08T18:00Z` + + :type duration: integer + :param duration: The number of minutes prior to the time of the request + for which to retrieve events. For example, if the request is sent + at 18:00 and you specify a duration of 60, then only events which + have occurred after 17:00 will be returned. + Default: `60` + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeEvents request exceed the value specified in `MaxRecords`, + AWS returns a value in the `Marker` field of the response. You can + retrieve the next set of response records by providing the returned + marker value in the `Marker` parameter and retrying the request. + + """ + params = {} + if source_identifier is not None: + params['SourceIdentifier'] = source_identifier + if source_type is not None: + params['SourceType'] = source_type + if start_time is not None: + params['StartTime'] = start_time + if end_time is not None: + params['EndTime'] = end_time + if duration is not None: + params['Duration'] = duration + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEvents', + verb='POST', + path='/', params=params) + + def describe_hsm_client_certificates(self, + hsm_client_certificate_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified HSM client + certificate. If no certificate ID is specified, returns + information about all the HSM certificates owned by your AWS + customer account. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of a specific + HSM client certificate for which you want information. If no + identifier is specified, information is returned for all HSM client + certificates owned by your AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeHsmClientCertificates request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmClientCertificates', + verb='POST', + path='/', params=params) + + def describe_hsm_configurations(self, hsm_configuration_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified Amazon Redshift HSM + configuration. If no configuration ID is specified, returns + information about all the HSM configurations owned by your AWS + customer account. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of a specific + Amazon Redshift HSM configuration to be described. If no identifier + is specified, information is returned for all HSM configurations + owned by your AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeHsmConfigurations request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmConfigurations', + verb='POST', + path='/', params=params) + + def describe_logging_status(self, cluster_identifier): + """ + Describes whether information, such as queries and connection + attempts, is being logged for the specified Amazon Redshift + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster to get the + logging status from. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeLoggingStatus', + verb='POST', + path='/', params=params) + + def describe_orderable_cluster_options(self, cluster_version=None, + node_type=None, max_records=None, + marker=None): + """ + Returns a list of orderable cluster options. Before you create + a new cluster you can use this operation to find what options + are available, such as the EC2 Availability Zones (AZ) in the + specific AWS region that you can specify, and the node types + you can request. The node types differ by available storage, + memory, CPU and price. With the cost involved you might want + to obtain a list of cluster options in the specific region and + specify values when creating a cluster. For more information + about managing clusters, go to `Amazon Redshift Clusters`_ in + the Amazon Redshift Management Guide + + :type cluster_version: string + :param cluster_version: The version filter value. Specify this + parameter to show only the available offerings matching the + specified version. + Default: All versions. + + Constraints: Must be one of the version returned from + DescribeClusterVersions. + + :type node_type: string + :param node_type: The node type filter value. Specify this parameter to + show only the available offerings matching the specified node type. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeOrderableClusterOptions request exceed the value specified + in `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if node_type is not None: + params['NodeType'] = node_type + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeOrderableClusterOptions', + verb='POST', + path='/', params=params) + + def describe_reserved_node_offerings(self, + reserved_node_offering_id=None, + max_records=None, marker=None): + """ + Returns a list of the available reserved node offerings by + Amazon Redshift with their descriptions including the node + type, the fixed and recurring costs of reserving the node and + duration the node will be reserved for you. These descriptions + help you determine which reserve node offering you want to + purchase. You then use the unique offering ID in you call to + PurchaseReservedNodeOffering to reserve one or more nodes for + your Amazon Redshift cluster. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier for the + offering. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeReservedNodeOfferings request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if reserved_node_offering_id is not None: + params['ReservedNodeOfferingId'] = reserved_node_offering_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodeOfferings', + verb='POST', + path='/', params=params) + + def describe_reserved_nodes(self, reserved_node_id=None, + max_records=None, marker=None): + """ + Returns the descriptions of the reserved nodes. + + :type reserved_node_id: string + :param reserved_node_id: Identifier for the node reservation. + + :type max_records: integer + :param max_records: The maximum number of response records to return in + each call. If the number of remaining response records exceeds the + specified `MaxRecords` value, a value is returned in a `marker` + field of the response. You can retrieve the next set of records by + retrying the command with the returned marker value. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional parameter that specifies the starting point + to return a set of response records. When the results of a + DescribeReservedNodes request exceed the value specified in + `MaxRecords`, AWS returns a value in the `Marker` field of the + response. You can retrieve the next set of response records by + providing the returned marker value in the `Marker` parameter and + retrying the request. + + """ + params = {} + if reserved_node_id is not None: + params['ReservedNodeId'] = reserved_node_id + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeReservedNodes', + verb='POST', + path='/', params=params) + + def describe_resize(self, cluster_identifier): + """ + Returns information about the last resize operation for the + specified cluster. If no resize operation has ever been + initiated for the specified cluster, a `HTTP 404` error is + returned. If a resize operation was initiated and completed, + the status of the resize remains as `SUCCEEDED` until the next + resize. + + A resize operation can be requested using ModifyCluster and + specifying a different number or type of nodes for the + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of a cluster whose + resize progress you are requesting. This parameter isn't case- + sensitive. + By default, resize operations for all clusters defined for an AWS + account are returned. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeResize', + verb='POST', + path='/', params=params) + + def disable_logging(self, cluster_identifier): + """ + Stops logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be stopped. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableLogging', + verb='POST', + path='/', params=params) + + def disable_snapshot_copy(self, cluster_identifier): + """ + Disables the automatic copying of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + that you want to disable copying of snapshots to a destination + region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableSnapshotCopy', + verb='POST', + path='/', params=params) + + def enable_logging(self, cluster_identifier, bucket_name, + s3_key_prefix=None): + """ + Starts logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be started. + Example: `examplecluster` + + :type bucket_name: string + :param bucket_name: + The name of an existing S3 bucket where the log files are to be stored. + + Constraints: + + + + Must be in the same region as the cluster + + The cluster must have read bucket and put object permissions + + :type s3_key_prefix: string + :param s3_key_prefix: + The prefix applied to the log file names. + + Constraints: + + + + Cannot exceed 512 characters + + Cannot contain spaces( ), double quotes ("), single quotes ('), a + backslash (\), or control characters. The hexadecimal codes for + invalid characters are: + + + x00 to x20 + + x22 + + x27 + + x5c + + x7f or larger + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'BucketName': bucket_name, + } + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + return self._make_request( + action='EnableLogging', + verb='POST', + path='/', params=params) + + def enable_snapshot_copy(self, cluster_identifier, destination_region, + retention_period=None): + """ + Enables the automatic copy of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + to copy snapshots from. + Constraints: Must be the valid name of an existing cluster that does + not already have cross-region snapshot copy enabled. + + :type destination_region: string + :param destination_region: The destination region that you want to copy + snapshots to. + Constraints: Must be the name of a valid region. For more information, + see `Regions and Endpoints`_ in the Amazon Web Services General + Reference. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + Default: 7. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'DestinationRegion': destination_region, + } + if retention_period is not None: + params['RetentionPeriod'] = retention_period + return self._make_request( + action='EnableSnapshotCopy', + verb='POST', + path='/', params=params) + + def modify_cluster(self, cluster_identifier, cluster_type=None, + node_type=None, number_of_nodes=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + master_user_password=None, + cluster_parameter_group_name=None, + automated_snapshot_retention_period=None, + preferred_maintenance_window=None, + cluster_version=None, allow_version_upgrade=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, + new_cluster_identifier=None): + """ + Modifies the settings for a cluster. For example, you can add + another security or parameter group, update the preferred + maintenance window, or change the master user password. + Resetting a cluster password or modifying the security groups + associated with a cluster do not need a reboot. However, + modifying a parameter group requires a reboot for parameters + to take effect. For more information about managing clusters, + go to `Amazon Redshift Clusters`_ in the Amazon Redshift + Management Guide + + You can also change node type and the number of nodes to scale + up or down the cluster. When resizing a cluster, you must + specify both the number of nodes and the node type even if one + of the parameters does not change. If you specify the same + number of nodes and node type that are already configured for + the cluster, an error is returned. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster to be + modified. + Example: `examplecluster` + + :type cluster_type: string + :param cluster_type: The new cluster type. + When you submit your cluster resize request, your existing cluster goes + into a read-only mode. After Amazon Redshift provisions a new + cluster based on your resize requirements, there will be outage for + a period while the old cluster is deleted and your connection is + switched to the new cluster. You can use DescribeResize to track + the progress of the resize request. + + Valid Values: ` multi-node | single-node ` + + :type node_type: string + :param node_type: The new node type of the cluster. If you specify a + new node type, you must also specify the number of nodes parameter + also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use the + DescribeResize to track the progress of the resize request. + + Valid Values: ` dw1.xlarge` | `dw1.8xlarge` | `dw2.large` | + `dw2.8xlarge`. + + :type number_of_nodes: integer + :param number_of_nodes: The new number of nodes of the cluster. If you + specify a new number of nodes, you must also specify the node type + parameter also. + When you submit your request to resize a cluster, Amazon Redshift sets + access permissions for the cluster to read-only. After Amazon + Redshift provisions a new cluster according to your resize + requirements, there will be a temporary outage while the old + cluster is deleted and your connection is switched to the new + cluster. When the new connection is complete, the original access + permissions for the cluster are restored. You can use + DescribeResize to track the progress of the resize request. + + Valid Values: Integer greater than `0`. + + :type cluster_security_groups: list + :param cluster_security_groups: + A list of cluster security groups to be authorized on this cluster. + This change is asynchronously applied as soon as possible. + + Security groups currently associated with the cluster, and not in the + list of groups to apply, will be revoked from the cluster. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens + + First character must be a letter + + Cannot end with a hyphen or contain two consecutive hyphens + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of virtual private cloud (VPC) + security groups to be associated with the cluster. + + :type master_user_password: string + :param master_user_password: + The new password for the cluster master user. This change is + asynchronously applied as soon as possible. Between the time of the + request and the completion of the request, the `MasterUserPassword` + element exists in the `PendingModifiedValues` element of the + operation response. + + Default: Uses existing setting. + + Constraints: + + + + Must be between 8 and 64 characters in length. + + Must contain at least one uppercase letter. + + Must contain at least one lowercase letter. + + Must contain one number. + + Can be any printable ASCII character (ASCII code 33 to 126) except ' + (single quote), " (double quote), \, /, @, or space. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: The name of the cluster parameter + group to apply to this cluster. This change is applied only after + the cluster is rebooted. To reboot a cluster use RebootCluster. + Default: Uses existing setting. + + Constraints: The cluster parameter group must be in the same parameter + group family that matches the cluster version. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + If you decrease the automated snapshot retention period from its + current value, existing automated snapshots that fall outside of + the new retention period will be immediately deleted. + + Default: Uses existing setting. + + Constraints: Must be a value from 0 to 35. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which system maintenance can occur, if necessary. If system + maintenance is necessary during the window, it may result in an + outage. + This maintenance window change is made immediately. If the new + maintenance window indicates the current time, there must be at + least 120 minutes between the current time and end of the window in + order to ensure that pending changes are applied. + + Default: Uses existing setting. + + Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`. + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Must be at least 30 minutes. + + :type cluster_version: string + :param cluster_version: The new version number of the Amazon Redshift + engine to upgrade to. + For major version upgrades, if a non-default cluster parameter group is + currently in use, a new cluster parameter group in the cluster + parameter group family for the new version must be specified. The + new cluster parameter group can be the default for that cluster + parameter group family. For more information about managing + parameter groups, go to `Amazon Redshift Parameter Groups`_ in the + Amazon Redshift Management Guide . + + Example: `1.0` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades will be applied + automatically to the cluster during the maintenance window. + Default: `False` + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type new_cluster_identifier: string + :param new_cluster_identifier: The new identifier for the cluster. + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + if cluster_type is not None: + params['ClusterType'] = cluster_type + if node_type is not None: + params['NodeType'] = node_type + if number_of_nodes is not None: + params['NumberOfNodes'] = number_of_nodes + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if master_user_password is not None: + params['MasterUserPassword'] = master_user_password + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if cluster_version is not None: + params['ClusterVersion'] = cluster_version + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if new_cluster_identifier is not None: + params['NewClusterIdentifier'] = new_cluster_identifier + return self._make_request( + action='ModifyCluster', + verb='POST', + path='/', params=params) + + def modify_cluster_parameter_group(self, parameter_group_name, + parameters): + """ + Modifies the parameters of a parameter group. + + For more information about managing parameter groups, go to + `Amazon Redshift Parameter Groups`_ in the Amazon Redshift + Management Guide . + + :type parameter_group_name: string + :param parameter_group_name: The name of the parameter group to be + modified. + + :type parameters: list + :param parameters: An array of parameters to be modified. A maximum of + 20 parameters can be modified in a single request. + For each parameter to be modified, you must supply at least the + parameter name and parameter value; other name-value pairs of the + parameter are optional. + + For the workload management (WLM) configuration, you must supply all + the name-value pairs in the wlm_json_configuration parameter. + + """ + params = {'ParameterGroupName': parameter_group_name, } + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ModifyClusterParameterGroup', + verb='POST', + path='/', params=params) + + def modify_cluster_subnet_group(self, cluster_subnet_group_name, + subnet_ids, description=None): + """ + Modifies a cluster subnet group to include the specified list + of VPC subnets. The operation replaces the existing list of + subnets with the new list of subnets. + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group to be + modified. + + :type description: string + :param description: A text description of the subnet group to be + modified. + + :type subnet_ids: list + :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets + can be modified in a single request. + + """ + params = { + 'ClusterSubnetGroupName': cluster_subnet_group_name, + } + self.build_list_params(params, + subnet_ids, + 'SubnetIds.member') + if description is not None: + params['Description'] = description + return self._make_request( + action='ModifyClusterSubnetGroup', + verb='POST', + path='/', params=params) + + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + source_ids=None, event_categories=None, + severity=None, enabled=None): + """ + Modifies an existing Amazon Redshift event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the modified Amazon Redshift + event notification subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + to be used by the event notification subscription. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value indicating if the subscription is + enabled. `True` indicates the subscription is enabled + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_snapshot_copy_retention_period(self, cluster_identifier, + retention_period): + """ + Modifies the number of days to retain automated snapshots in + the destination region after they are copied from the source + region. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster for + which you want to change the retention period for automated + snapshots that are copied to a destination region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + If you decrease the retention period for automated snapshots that are + copied to a destination region, Amazon Redshift will delete any + existing automated snapshots that were copied to the destination + region and that fall outside of the new retention period. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'RetentionPeriod': retention_period, + } + return self._make_request( + action='ModifySnapshotCopyRetentionPeriod', + verb='POST', + path='/', params=params) + + def purchase_reserved_node_offering(self, reserved_node_offering_id, + node_count=None): + """ + Allows you to purchase reserved nodes. Amazon Redshift offers + a predefined set of reserved node offerings. You can purchase + one of the offerings. You can call the + DescribeReservedNodeOfferings API to obtain the available + reserved node offerings. You can call this API by providing a + specific reserved node offering and the number of nodes you + want to reserve. + + For more information about managing parameter groups, go to + `Purchasing Reserved Nodes`_ in the Amazon Redshift Management + Guide . + + :type reserved_node_offering_id: string + :param reserved_node_offering_id: The unique identifier of the reserved + node offering you want to purchase. + + :type node_count: integer + :param node_count: The number of reserved nodes you want to purchase. + Default: `1` + + """ + params = { + 'ReservedNodeOfferingId': reserved_node_offering_id, + } + if node_count is not None: + params['NodeCount'] = node_count + return self._make_request( + action='PurchaseReservedNodeOffering', + verb='POST', + path='/', params=params) + + def reboot_cluster(self, cluster_identifier): + """ + Reboots a cluster. This action is taken as soon as possible. + It results in a momentary outage to the cluster, during which + the cluster status is set to `rebooting`. A cluster event is + created when the reboot is completed. Any pending cluster + modifications (see ModifyCluster) are applied at this reboot. + For more information about managing clusters, go to `Amazon + Redshift Clusters`_ in the Amazon Redshift Management Guide + + :type cluster_identifier: string + :param cluster_identifier: The cluster identifier. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RebootCluster', + verb='POST', + path='/', params=params) + + def reset_cluster_parameter_group(self, parameter_group_name, + reset_all_parameters=None, + parameters=None): + """ + Sets one or more parameters of the specified parameter group + to their default values and sets the source values of the + parameters to "engine-default". To reset the entire parameter + group specify the ResetAllParameters parameter. For parameter + changes to take effect you must reboot any associated + clusters. + + :type parameter_group_name: string + :param parameter_group_name: The name of the cluster parameter group to + be reset. + + :type reset_all_parameters: boolean + :param reset_all_parameters: If `True`, all parameters in the specified + parameter group will be reset to their default values. + Default: `True` + + :type parameters: list + :param parameters: An array of names of parameters to be reset. If + ResetAllParameters option is not used, then at least one parameter + name must be supplied. + Constraints: A maximum of 20 parameters can be reset in a single + request. + + """ + params = {'ParameterGroupName': parameter_group_name, } + if reset_all_parameters is not None: + params['ResetAllParameters'] = str( + reset_all_parameters).lower() + if parameters is not None: + self.build_complex_list_params( + params, parameters, + 'Parameters.member', + ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion')) + return self._make_request( + action='ResetClusterParameterGroup', + verb='POST', + path='/', params=params) + + def restore_from_cluster_snapshot(self, cluster_identifier, + snapshot_identifier, + snapshot_cluster_identifier=None, + port=None, availability_zone=None, + allow_version_upgrade=None, + cluster_subnet_group_name=None, + publicly_accessible=None, + owner_account=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, + elastic_ip=None, + cluster_parameter_group_name=None, + cluster_security_groups=None, + vpc_security_group_ids=None, + preferred_maintenance_window=None, + automated_snapshot_retention_period=None): + """ + Creates a new cluster from a snapshot. Amazon Redshift creates + the resulting cluster with the same configuration as the + original cluster from which the snapshot was created, except + that the new cluster is created with the default cluster + security and parameter group. After Amazon Redshift creates + the cluster you can use the ModifyCluster API to associate a + different security group and different parameter group with + the restored cluster. + + If you restore a cluster into a VPC, you must provide a + cluster subnet group where you want the cluster restored. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster that will be + created from restoring the snapshot. + + Constraints: + + + + Must contain from 1 to 63 alphanumeric characters or hyphens. + + Alphabetic characters must be lowercase. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + Must be unique for all clusters within an AWS account. + + :type snapshot_identifier: string + :param snapshot_identifier: The name of the snapshot from which to + create the new cluster. This parameter isn't case sensitive. + Example: `my-snapshot-id` + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The name of the cluster the source + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type port: integer + :param port: The port number on which the cluster accepts connections. + Default: The same port as the original cluster. + + Constraints: Must be between `1115` and `65535`. + + :type availability_zone: string + :param availability_zone: The Amazon EC2 Availability Zone in which to + restore the cluster. + Default: A random, system-chosen Availability Zone. + + Example: `us-east-1a` + + :type allow_version_upgrade: boolean + :param allow_version_upgrade: If `True`, upgrades can be applied during + the maintenance window to the Amazon Redshift engine that is + running on the cluster. + Default: `True` + + :type cluster_subnet_group_name: string + :param cluster_subnet_group_name: The name of the subnet group where + you want to cluster restored. + A snapshot of cluster in VPC can be restored only in VPC. Therefore, + you must provide subnet group name where you want the cluster + restored. + + :type publicly_accessible: boolean + :param publicly_accessible: If `True`, the cluster can be accessed from + a public network. + + :type owner_account: string + :param owner_account: The AWS customer account used to create or copy + the snapshot. Required if you are restoring a snapshot you do not + own, optional if you own the snapshot. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The elastic IP (EIP) address for the cluster. + + :type cluster_parameter_group_name: string + :param cluster_parameter_group_name: + The name of the parameter group to be associated with this cluster. + + Default: The default Amazon Redshift cluster parameter group. For + information about the default parameter group, go to `Working with + Amazon Redshift Parameter Groups`_. + + Constraints: + + + + Must be 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type cluster_security_groups: list + :param cluster_security_groups: A list of security groups to be + associated with this cluster. + Default: The default cluster security group for Amazon Redshift. + + Cluster security groups only apply to clusters outside of VPCs. + + :type vpc_security_group_ids: list + :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC) + security groups to be associated with the cluster. + Default: The default VPC security group is associated with the cluster. + + VPC security groups only apply to clusters in VPCs. + + :type preferred_maintenance_window: string + :param preferred_maintenance_window: The weekly time range (in UTC) + during which automated cluster maintenance can occur. + Format: `ddd:hh24:mi-ddd:hh24:mi` + + Default: The value selected for the cluster from which the snapshot was + taken. The following list shows the time blocks for each region + from which the default maintenance windows are assigned. + + + + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC + + **US-West (Oregon) Region** 06:00-14:00 UTC + + **EU (Ireland) Region** 22:00-06:00 UTC + + **Asia Pacific (Singapore) Region** 14:00-22:00 UTC + + **Asia Pacific (Sydney) Region** 12:00-20:00 UTC + + **Asia Pacific (Tokyo) Region** 17:00-03:00 UTC + + + Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun + + Constraints: Minimum 30-minute window. + + :type automated_snapshot_retention_period: integer + :param automated_snapshot_retention_period: The number of days that + automated snapshots are retained. If the value is 0, automated + snapshots are disabled. Even if automated snapshots are disabled, + you can still create manual snapshots when you want with + CreateClusterSnapshot. + Default: The value selected for the cluster from which the snapshot was + taken. + + Constraints: Must be a value from 0 to 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'SnapshotIdentifier': snapshot_identifier, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + if port is not None: + params['Port'] = port + if availability_zone is not None: + params['AvailabilityZone'] = availability_zone + if allow_version_upgrade is not None: + params['AllowVersionUpgrade'] = str( + allow_version_upgrade).lower() + if cluster_subnet_group_name is not None: + params['ClusterSubnetGroupName'] = cluster_subnet_group_name + if publicly_accessible is not None: + params['PubliclyAccessible'] = str( + publicly_accessible).lower() + if owner_account is not None: + params['OwnerAccount'] = owner_account + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip + if cluster_parameter_group_name is not None: + params['ClusterParameterGroupName'] = cluster_parameter_group_name + if cluster_security_groups is not None: + self.build_list_params(params, + cluster_security_groups, + 'ClusterSecurityGroups.member') + if vpc_security_group_ids is not None: + self.build_list_params(params, + vpc_security_group_ids, + 'VpcSecurityGroupIds.member') + if preferred_maintenance_window is not None: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if automated_snapshot_retention_period is not None: + params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period + return self._make_request( + action='RestoreFromClusterSnapshot', + verb='POST', + path='/', params=params) + + def revoke_cluster_security_group_ingress(self, + cluster_security_group_name, + cidrip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Revokes an ingress rule in an Amazon Redshift security group + for a previously authorized IP range or Amazon EC2 security + group. To add an ingress rule, see + AuthorizeClusterSecurityGroupIngress. For information about + managing security groups, go to `Amazon Redshift Cluster + Security Groups`_ in the Amazon Redshift Management Guide . + + :type cluster_security_group_name: string + :param cluster_security_group_name: The name of the security Group from + which to revoke the ingress rule. + + :type cidrip: string + :param cidrip: The IP range for which to revoke access. This range must + be a valid Classless Inter-Domain Routing (CIDR) block of IP + addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and + `EC2SecurityGroupOwnerId` cannot be provided. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 Security Group + whose access is to be revoked. If `EC2SecurityGroupName` is + specified, `EC2SecurityGroupOwnerId` must also be provided and + `CIDRIP` cannot be provided. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The AWS account number of the owner + of the security group specified in the `EC2SecurityGroupName` + parameter. The AWS access key ID is not an acceptable value. If + `EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must + also be provided. and `CIDRIP` cannot be provided. + Example: `111122223333` + + """ + params = { + 'ClusterSecurityGroupName': cluster_security_group_name, + } + if cidrip is not None: + params['CIDRIP'] = cidrip + if ec2_security_group_name is not None: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id is not None: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + return self._make_request( + action='RevokeClusterSecurityGroupIngress', + verb='POST', + path='/', params=params) + + def revoke_snapshot_access(self, snapshot_identifier, + account_with_restore_access, + snapshot_cluster_identifier=None): + """ + Removes the ability of the specified AWS customer account to + restore the specified snapshot. If the account is currently + restoring the snapshot, the restore will run to completion. + + For more information about working with snapshots, go to + `Amazon Redshift Snapshots`_ in the Amazon Redshift Management + Guide . + + :type snapshot_identifier: string + :param snapshot_identifier: The identifier of the snapshot that the + account can no longer access. + + :type snapshot_cluster_identifier: string + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. + + :type account_with_restore_access: string + :param account_with_restore_access: The identifier of the AWS customer + account that can no longer restore the specified snapshot. + + """ + params = { + 'SnapshotIdentifier': snapshot_identifier, + 'AccountWithRestoreAccess': account_with_restore_access, + } + if snapshot_cluster_identifier is not None: + params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier + return self._make_request( + action='RevokeSnapshotAccess', + verb='POST', + path='/', params=params) + + def rotate_encryption_key(self, cluster_identifier): + """ + Rotates the encryption keys for a cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster that + you want to rotate the encryption keys for. + Constraints: Must be the name of valid cluster that has encryption + enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RotateEncryptionKey', + verb='POST', + path='/', params=params) + + def _make_request(self, action, verb, path, params): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb='POST', + path='/', params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + json_body = json.loads(body) + fault_name = json_body.get('Error', {}).get('Code', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/regioninfo.py b/ext/boto/regioninfo.py new file mode 100644 index 0000000000..d840bc613d --- /dev/null +++ b/ext/boto/regioninfo.py @@ -0,0 +1,290 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os + +import boto +from boto.compat import json +from boto.exception import BotoClientError +from boto.endpoints import BotoEndpointResolver +from boto.endpoints import StaticEndpointBuilder + + +_endpoints_cache = {} + + +def load_endpoint_json(path): + """ + Loads a given JSON file & returns it. + + :param path: The path to the JSON file + :type path: string + + :returns: The loaded data + """ + return _load_json_file(path) + + +def _load_json_file(path): + """ + Loads a given JSON file & returns it. + + :param path: The path to the JSON file + :type path: string + + :returns: The loaded data + """ + with open(path, 'r') as endpoints_file: + return json.load(endpoints_file) + + +def merge_endpoints(defaults, additions): + """ + Given an existing set of endpoint data, this will deep-update it with + any similarly structured data in the additions. + + :param defaults: The existing endpoints data + :type defaults: dict + + :param defaults: The additional endpoints data + :type defaults: dict + + :returns: The modified endpoints data + :rtype: dict + """ + # We can't just do an ``defaults.update(...)`` here, as that could + # *overwrite* regions if present in both. + # We'll iterate instead, essentially doing a deeper merge. + for service, region_info in additions.items(): + # Set the default, if not present, to an empty dict. + defaults.setdefault(service, {}) + defaults[service].update(region_info) + + return defaults + + +def load_regions(): + """ + Actually load the region/endpoint information from the JSON files. + + By default, this loads from the default included ``boto/endpoints.json`` + file. + + Users can override/extend this by supplying either a ``BOTO_ENDPOINTS`` + environment variable or a ``endpoints_path`` config variable, either of + which should be an absolute path to the user's JSON file. + + :returns: The endpoints data + :rtype: dict + """ + # Load the defaults first. + endpoints = _load_builtin_endpoints() + additional_path = None + + # Try the ENV var. If not, check the config file. + if os.environ.get('BOTO_ENDPOINTS'): + additional_path = os.environ['BOTO_ENDPOINTS'] + elif boto.config.get('Boto', 'endpoints_path'): + additional_path = boto.config.get('Boto', 'endpoints_path') + + # If there's a file provided, we'll load it & additively merge it into + # the endpoints. + if additional_path: + additional = load_endpoint_json(additional_path) + endpoints = merge_endpoints(endpoints, additional) + + return endpoints + + +def _load_builtin_endpoints(_cache=_endpoints_cache): + """Loads the builtin endpoints in the legacy format.""" + # If there's a cached response, return it + if _cache: + return _cache + + # Load the endpoints file + endpoints = _load_json_file(boto.ENDPOINTS_PATH) + + # Build the endpoints into the legacy format + resolver = BotoEndpointResolver(endpoints) + builder = StaticEndpointBuilder(resolver) + endpoints = builder.build_static_endpoints() + + # Cache the endpoints and then return them + _cache.update(endpoints) + return _cache + + +def get_regions(service_name, region_cls=None, connection_cls=None): + """ + Given a service name (like ``ec2``), returns a list of ``RegionInfo`` + objects for that service. + + This leverages the ``endpoints.json`` file (+ optional user overrides) to + configure/construct all the objects. + + :param service_name: The name of the service to construct the ``RegionInfo`` + objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. + :type service_name: string + + :param region_cls: (Optional) The class to use when constructing. By + default, this is ``RegionInfo``. + :type region_cls: class + + :param connection_cls: (Optional) The connection class for the + ``RegionInfo`` object. Providing this allows the ``connect`` method on + the ``RegionInfo`` to work. Default is ``None`` (no connection). + :type connection_cls: class + + :returns: A list of configured ``RegionInfo`` objects + :rtype: list + """ + endpoints = load_regions() + + if service_name not in endpoints: + raise BotoClientError( + "Service '%s' not found in endpoints." % service_name + ) + + if region_cls is None: + region_cls = RegionInfo + + region_objs = [] + + for region_name, endpoint in endpoints.get(service_name, {}).items(): + region_objs.append( + region_cls( + name=region_name, + endpoint=endpoint, + connection_cls=connection_cls + ) + ) + + return region_objs + + +def connect(service_name, region_name, region_cls=None, + connection_cls=None, **kw_params): + """Create a connection class for a given service in a given region. + + :param service_name: The name of the service to construct the + ``RegionInfo`` object for, e.g. ``ec2``, ``s3``, etc. + :type service_name: str + + :param region_name: The name of the region to connect to, e.g. + ``us-west-2``, ``eu-central-1``, etc. + :type region_name: str + + :param region_cls: (Optional) The class to use when constructing. By + default, this is ``RegionInfo``. + :type region_cls: class + + :param connection_cls: (Optional) The connection class for the + ``RegionInfo`` object. Providing this allows the ``connect`` method on + the ``RegionInfo`` to work. Default is ``None`` (no connection). + :type connection_cls: class + + :returns: A configured connection class. + """ + if region_cls is None: + region_cls = RegionInfo + region = _get_region(service_name, region_name, region_cls, connection_cls) + + if region is None and _use_endpoint_heuristics(): + region = _get_region_with_heuristics( + service_name, region_name, region_cls, connection_cls + ) + + if region is None: + return None + + return region.connect(**kw_params) + + +def _get_region(service_name, region_name, region_cls=None, + connection_cls=None): + """Finds the region by searching through the known regions.""" + for region in get_regions(service_name, region_cls, connection_cls): + if region.name == region_name: + return region + return None + + +def _get_region_with_heuristics(service_name, region_name, region_cls=None, + connection_cls=None): + """Finds the region using known regions and heuristics.""" + endpoints = load_endpoint_json(boto.ENDPOINTS_PATH) + resolver = BotoEndpointResolver(endpoints) + hostname = resolver.resolve_hostname(service_name, region_name) + + return region_cls( + name=region_name, + endpoint=hostname, + connection_cls=connection_cls + ) + + +def _use_endpoint_heuristics(): + env_var = os.environ.get('BOTO_USE_ENDPOINT_HEURISTICS', 'false').lower() + config_var = boto.config.getbool('Boto', 'use_endpoint_heuristics', False) + return env_var == 'true' or config_var + + +class RegionInfo(object): + """ + Represents an AWS Region + """ + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + self.connection_cls = connection_cls + + def __repr__(self): + return 'RegionInfo:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'regionName': + self.name = value + elif name == 'regionEndpoint': + self.endpoint = value + else: + setattr(self, name, value) + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(region=self, **kw_params) diff --git a/ext/boto/requestlog.py b/ext/boto/requestlog.py new file mode 100644 index 0000000000..d8009fe76f --- /dev/null +++ b/ext/boto/requestlog.py @@ -0,0 +1,39 @@ +import sys +from datetime import datetime +from threading import Thread +import Queue + +from boto.utils import RequestHook +from boto.compat import long_type + + +class RequestLogger(RequestHook): + """ + This class implements a request logger that uses a single thread to + write to a log file. + """ + def __init__(self, filename='/tmp/request_log.csv'): + self.request_log_file = open(filename, 'w') + self.request_log_queue = Queue.Queue(100) + Thread(target=self._request_log_worker).start() + + def handle_request_data(self, request, response, error=False): + len = 0 if error else response.getheader('Content-Length') + now = datetime.now() + time = now.strftime('%Y-%m-%d %H:%M:%S') + td = (now - request.start_time) + duration = (td.microseconds + long_type(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 + + # write output including timestamp, status code, response time, response size, request action + self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action'])) + + def _request_log_worker(self): + while True: + try: + item = self.request_log_queue.get(True) + self.request_log_file.write(item) + self.request_log_file.flush() + self.request_log_queue.task_done() + except: + import traceback + traceback.print_exc(file=sys.stdout) diff --git a/ext/boto/resultset.py b/ext/boto/resultset.py new file mode 100644 index 0000000000..189a47a3a2 --- /dev/null +++ b/ext/boto/resultset.py @@ -0,0 +1,176 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +class ResultSet(list): + """ + The ResultSet is used to pass results back from the Amazon services + to the client. It is light wrapper around Python's :py:class:`list` class, + with some additional methods for parsing XML results from AWS. + Because I don't really want any dependencies on external libraries, + I'm using the standard SAX parser that comes with Python. The good news is + that it's quite fast and efficient but it makes some things rather + difficult. + + You can pass in, as the marker_elem parameter, a list of tuples. + Each tuple contains a string as the first element which represents + the XML element that the resultset needs to be on the lookout for + and a Python class as the second element of the tuple. Each time the + specified element is found in the XML, a new instance of the class + will be created and popped onto the stack. + + :ivar str next_token: A hash used to assist in paging through very long + result sets. In most cases, passing this value to certain methods + will give you another 'page' of results. + """ + def __init__(self, marker_elem=None): + list.__init__(self) + if isinstance(marker_elem, list): + self.markers = marker_elem + else: + self.markers = [] + self.marker = None + self.key_marker = None + self.next_marker = None # avail when delimiter used + self.next_key_marker = None + self.next_upload_id_marker = None + self.next_version_id_marker = None + self.next_generation_marker = None + self.version_id_marker = None + self.is_truncated = False + self.next_token = None + self.status = True + + def startElement(self, name, attrs, connection): + for t in self.markers: + if name == t[0]: + obj = t[1](connection) + self.append(obj) + return obj + if name == 'Owner': + # Makes owner available for get_service and + # perhaps other lists where not handled by + # another element. + self.owner = User() + return self.owner + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'KeyMarker': + self.key_marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'NextKeyMarker': + self.next_key_marker = value + elif name == 'VersionIdMarker': + self.version_id_marker = value + elif name == 'NextVersionIdMarker': + self.next_version_id_marker = value + elif name == 'NextGenerationMarker': + self.next_generation_marker = value + elif name == 'UploadIdMarker': + self.upload_id_marker = value + elif name == 'NextUploadIdMarker': + self.next_upload_id_marker = value + elif name == 'Bucket': + self.bucket = value + elif name == 'MaxUploads': + self.max_uploads = int(value) + elif name == 'MaxItems': + self.max_items = int(value) + elif name == 'Prefix': + self.prefix = value + elif name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'ItemName': + self.append(value) + elif name == 'NextToken': + self.next_token = value + elif name == 'nextToken': + self.next_token = value + # Code exists which expects nextToken to be available, so we + # set it here to remain backwards-compatibile. + self.nextToken = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + else: + setattr(self, name, value) + + +class BooleanResult(object): + + def __init__(self, marker_elem=None): + self.status = True + self.request_id = None + self.box_usage = None + + def __repr__(self): + if self.status: + return 'True' + else: + return 'False' + + def __nonzero__(self): + return self.status + + def startElement(self, name, attrs, connection): + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + elif name == 'RequestId': + self.request_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'BoxUsage': + self.request_id = value + else: + setattr(self, name, value) diff --git a/ext/boto/roboto/__init__.py b/ext/boto/roboto/__init__.py new file mode 100644 index 0000000000..792d600548 --- /dev/null +++ b/ext/boto/roboto/__init__.py @@ -0,0 +1 @@ +# diff --git a/ext/boto/roboto/awsqueryrequest.py b/ext/boto/roboto/awsqueryrequest.py new file mode 100644 index 0000000000..793adf90ec --- /dev/null +++ b/ext/boto/roboto/awsqueryrequest.py @@ -0,0 +1,503 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import sys +import os +import boto +import optparse +import copy +import boto.exception +import boto.roboto.awsqueryservice + +import bdb +import traceback +try: + import epdb as debugger +except ImportError: + import pdb as debugger + +def boto_except_hook(debugger_flag, debug_flag): + def excepthook(typ, value, tb): + if typ is bdb.BdbQuit: + sys.exit(1) + sys.excepthook = sys.__excepthook__ + + if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty(): + if debugger.__name__ == 'epdb': + debugger.post_mortem(tb, typ, value) + else: + debugger.post_mortem(tb) + elif debug_flag: + print(traceback.print_tb(tb)) + sys.exit(1) + else: + print(value) + sys.exit(1) + + return excepthook + +class Line(object): + + def __init__(self, fmt, data, label): + self.fmt = fmt + self.data = data + self.label = label + self.line = '%s\t' % label + self.printed = False + + def append(self, datum): + self.line += '%s\t' % datum + + def print_it(self): + if not self.printed: + print(self.line) + self.printed = True + +class RequiredParamError(boto.exception.BotoClientError): + + def __init__(self, required): + self.required = required + s = 'Required parameters are missing: %s' % self.required + super(RequiredParamError, self).__init__(s) + +class EncoderError(boto.exception.BotoClientError): + + def __init__(self, error_msg): + s = 'Error encoding value (%s)' % error_msg + super(EncoderError, self).__init__(s) + +class FilterError(boto.exception.BotoClientError): + + def __init__(self, filters): + self.filters = filters + s = 'Unknown filters: %s' % self.filters + super(FilterError, self).__init__(s) + +class Encoder(object): + + @classmethod + def encode(cls, p, rp, v, label=None): + if p.name.startswith('_'): + return + try: + mthd = getattr(cls, 'encode_'+p.ptype) + mthd(p, rp, v, label) + except AttributeError: + raise EncoderError('Unknown type: %s' % p.ptype) + + @classmethod + def encode_string(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + encode_file = encode_string + encode_enum = encode_string + + @classmethod + def encode_integer(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = '%d' % v + + @classmethod + def encode_boolean(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + if v: + v = 'true' + else: + v = 'false' + rp[label] = v + + @classmethod + def encode_datetime(cls, p, rp, v, l): + if l: + label = l + else: + label = p.name + rp[label] = v + + @classmethod + def encode_array(cls, p, rp, v, l): + v = boto.utils.mklist(v) + if l: + label = l + else: + label = p.name + label = label + '.%d' + for i, value in enumerate(v): + rp[label%(i+1)] = value + +class AWSQueryRequest(object): + + ServiceClass = None + + Description = '' + Params = [] + Args = [] + Filters = [] + Response = {} + + CLITypeMap = {'string' : 'string', + 'integer' : 'int', + 'int' : 'int', + 'enum' : 'choice', + 'datetime' : 'string', + 'dateTime' : 'string', + 'file' : 'string', + 'boolean' : None} + + @classmethod + def name(cls): + return cls.__name__ + + def __init__(self, **args): + self.args = args + self.parser = None + self.cli_options = None + self.cli_args = None + self.cli_output_format = None + self.connection = None + self.list_markers = [] + self.item_markers = [] + self.request_params = {} + self.connection_args = None + + def __repr__(self): + return self.name() + + def get_connection(self, **args): + if self.connection is None: + self.connection = self.ServiceClass(**args) + return self.connection + + @property + def status(self): + retval = None + if self.http_response is not None: + retval = self.http_response.status + return retval + + @property + def reason(self): + retval = None + if self.http_response is not None: + retval = self.http_response.reason + return retval + + @property + def request_id(self): + retval = None + if self.aws_response is not None: + retval = getattr(self.aws_response, 'requestId') + return retval + + def process_filters(self): + filters = self.args.get('filters', []) + filter_names = [f['name'] for f in self.Filters] + unknown_filters = [f for f in filters if f not in filter_names] + if unknown_filters: + raise FilterError('Unknown filters: %s' % unknown_filters) + for i, filter in enumerate(self.Filters): + name = filter['name'] + if name in filters: + self.request_params['Filter.%d.Name' % (i+1)] = name + for j, value in enumerate(boto.utils.mklist(filters[name])): + Encoder.encode(filter, self.request_params, value, + 'Filter.%d.Value.%d' % (i+1, j+1)) + + def process_args(self, **args): + """ + Responsible for walking through Params defined for the request and: + + * Matching them with keyword parameters passed to the request + constructor or via the command line. + * Checking to see if all required parameters have been specified + and raising an exception, if not. + * Encoding each value into the set of request parameters that will + be sent in the request to the AWS service. + """ + self.args.update(args) + self.connection_args = copy.copy(self.args) + if 'debug' in self.args and self.args['debug'] >= 2: + boto.set_stream_logger(self.name()) + required = [p.name for p in self.Params+self.Args if not p.optional] + for param in self.Params+self.Args: + if param.long_name: + python_name = param.long_name.replace('-', '_') + else: + python_name = boto.utils.pythonize_name(param.name, '_') + value = None + if python_name in self.args: + value = self.args[python_name] + if value is None: + value = param.default + if value is not None: + if param.name in required: + required.remove(param.name) + if param.request_param: + if param.encoder: + param.encoder(param, self.request_params, value) + else: + Encoder.encode(param, self.request_params, value) + if python_name in self.args: + del self.connection_args[python_name] + if required: + l = [] + for p in self.Params+self.Args: + if p.name in required: + if p.short_name and p.long_name: + l.append('(%s, %s)' % (p.optparse_short_name, + p.optparse_long_name)) + elif p.short_name: + l.append('(%s)' % p.optparse_short_name) + else: + l.append('(%s)' % p.optparse_long_name) + raise RequiredParamError(','.join(l)) + boto.log.debug('request_params: %s' % self.request_params) + self.process_markers(self.Response) + + def process_markers(self, fmt, prev_name=None): + if fmt and fmt['type'] == 'object': + for prop in fmt['properties']: + self.process_markers(prop, fmt['name']) + elif fmt and fmt['type'] == 'array': + self.list_markers.append(prev_name) + self.item_markers.append(fmt['name']) + + def send(self, verb='GET', **args): + self.process_args(**args) + self.process_filters() + conn = self.get_connection(**self.connection_args) + self.http_response = conn.make_request(self.name(), + self.request_params, + verb=verb) + self.body = self.http_response.read() + boto.log.debug(self.body) + if self.http_response.status == 200: + self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers, + item_marker=self.item_markers) + h = boto.jsonresponse.XmlHandler(self.aws_response, self) + h.parse(self.body) + return self.aws_response + else: + boto.log.error('%s %s' % (self.http_response.status, + self.http_response.reason)) + boto.log.error('%s' % self.body) + raise conn.ResponseError(self.http_response.status, + self.http_response.reason, + self.body) + + def add_standard_options(self): + group = optparse.OptionGroup(self.parser, 'Standard Options') + # add standard options that all commands get + group.add_option('-D', '--debug', action='store_true', + help='Turn on all debugging output') + group.add_option('--debugger', action='store_true', + default=False, + help='Enable interactive debugger on error') + group.add_option('-U', '--url', action='store', + help='Override service URL with value provided') + group.add_option('--region', action='store', + help='Name of the region to connect to') + group.add_option('-I', '--access-key-id', action='store', + help='Override access key value') + group.add_option('-S', '--secret-key', action='store', + help='Override secret key value') + group.add_option('--version', action='store_true', + help='Display version string') + if self.Filters: + self.group.add_option('--help-filters', action='store_true', + help='Display list of available filters') + self.group.add_option('--filter', action='append', + metavar=' name=value', + help='A filter for limiting the results') + self.parser.add_option_group(group) + + def process_standard_options(self, options, args, d): + if hasattr(options, 'help_filters') and options.help_filters: + print('Available filters:') + for filter in self.Filters: + print('%s\t%s' % (filter.name, filter.doc)) + sys.exit(0) + if options.debug: + self.args['debug'] = 2 + if options.url: + self.args['url'] = options.url + if options.region: + self.args['region'] = options.region + if options.access_key_id: + self.args['aws_access_key_id'] = options.access_key_id + if options.secret_key: + self.args['aws_secret_access_key'] = options.secret_key + if options.version: + # TODO - Where should the version # come from? + print('version x.xx') + exit(0) + sys.excepthook = boto_except_hook(options.debugger, + options.debug) + + def get_usage(self): + s = 'usage: %prog [options] ' + l = [ a.long_name for a in self.Args ] + s += ' '.join(l) + for a in self.Args: + if a.doc: + s += '\n\n\t%s - %s' % (a.long_name, a.doc) + return s + + def build_cli_parser(self): + self.parser = optparse.OptionParser(description=self.Description, + usage=self.get_usage()) + self.add_standard_options() + for param in self.Params: + ptype = action = choices = None + if param.ptype in self.CLITypeMap: + ptype = self.CLITypeMap[param.ptype] + action = 'store' + if param.ptype == 'boolean': + action = 'store_true' + elif param.ptype == 'array': + if len(param.items) == 1: + ptype = param.items[0]['type'] + action = 'append' + elif param.cardinality != 1: + action = 'append' + if ptype or action == 'store_true': + if param.short_name: + self.parser.add_option(param.optparse_short_name, + param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + elif param.long_name: + self.parser.add_option(param.optparse_long_name, + action=action, type=ptype, + choices=param.choices, + help=param.doc) + + def do_cli(self): + if not self.parser: + self.build_cli_parser() + self.cli_options, self.cli_args = self.parser.parse_args() + d = {} + self.process_standard_options(self.cli_options, self.cli_args, d) + for param in self.Params: + if param.long_name: + p_name = param.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(param.name) + value = getattr(self.cli_options, p_name) + if param.ptype == 'file' and value: + if value == '-': + value = sys.stdin.read() + else: + path = os.path.expanduser(value) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + value = fp.read() + fp.close() + else: + self.parser.error('Unable to read file: %s' % path) + d[p_name] = value + for arg in self.Args: + if arg.long_name: + p_name = arg.long_name.replace('-', '_') + else: + p_name = boto.utils.pythonize_name(arg.name) + value = None + if arg.cardinality == 1: + if len(self.cli_args) >= 1: + value = self.cli_args[0] + else: + value = self.cli_args + d[p_name] = value + self.args.update(d) + if hasattr(self.cli_options, 'filter') and self.cli_options.filter: + d = {} + for filter in self.cli_options.filter: + name, value = filter.split('=') + d[name] = value + if 'filters' in self.args: + self.args['filters'].update(d) + else: + self.args['filters'] = d + try: + response = self.main() + self.cli_formatter(response) + except RequiredParamError as e: + print(e) + sys.exit(1) + except self.ServiceClass.ResponseError as err: + print('Error(%s): %s' % (err.error_code, err.error_message)) + sys.exit(1) + except boto.roboto.awsqueryservice.NoCredentialsError as err: + print('Unable to find credentials.') + sys.exit(1) + except Exception as e: + print(e) + sys.exit(1) + + def _generic_cli_formatter(self, fmt, data, label=''): + if fmt['type'] == 'object': + for prop in fmt['properties']: + if 'name' in fmt: + if fmt['name'] in data: + data = data[fmt['name']] + if fmt['name'] in self.list_markers: + label = fmt['name'] + if label[-1] == 's': + label = label[0:-1] + label = label.upper() + self._generic_cli_formatter(prop, data, label) + elif fmt['type'] == 'array': + for item in data: + line = Line(fmt, item, label) + if isinstance(item, dict): + for field_name in item: + line.append(item[field_name]) + elif isinstance(item, basestring): + line.append(item) + line.print_it() + + def cli_formatter(self, data): + """ + This method is responsible for formatting the output for the + command line interface. The default behavior is to call the + generic CLI formatter which attempts to print something + reasonable. If you want specific formatting, you should + override this method and do your own thing. + + :type data: dict + :param data: The data returned by AWS. + """ + if data: + self._generic_cli_formatter(self.Response, data) + + diff --git a/ext/boto/roboto/awsqueryservice.py b/ext/boto/roboto/awsqueryservice.py new file mode 100644 index 0000000000..9bf95ac2be --- /dev/null +++ b/ext/boto/roboto/awsqueryservice.py @@ -0,0 +1,122 @@ +from __future__ import print_function +import os +import urlparse +import boto +import boto.connection +import boto.jsonresponse +import boto.exception +from boto.roboto import awsqueryrequest + +class NoCredentialsError(boto.exception.BotoClientError): + + def __init__(self): + s = 'Unable to find credentials' + super(NoCredentialsError, self).__init__(s) + +class AWSQueryService(boto.connection.AWSQueryConnection): + + Name = '' + Description = '' + APIVersion = '' + Authentication = 'sign-v2' + Path = '/' + Port = 443 + Provider = 'aws' + EnvURL = 'AWS_URL' + + Regions = [] + + def __init__(self, **args): + self.args = args + self.check_for_credential_file() + self.check_for_env_url() + if 'host' not in self.args: + if self.Regions: + region_name = self.args.get('region_name', + self.Regions[0]['name']) + for region in self.Regions: + if region['name'] == region_name: + self.args['host'] = region['endpoint'] + if 'path' not in self.args: + self.args['path'] = self.Path + if 'port' not in self.args: + self.args['port'] = self.Port + try: + super(AWSQueryService, self).__init__(**self.args) + self.aws_response = None + except boto.exception.NoAuthHandlerFound: + raise NoCredentialsError() + + def check_for_credential_file(self): + """ + Checks for the existence of an AWS credential file. + If the environment variable AWS_CREDENTIAL_FILE is + set and points to a file, that file will be read and + will be searched credentials. + Note that if credentials have been explicitelypassed + into the class constructor, those values always take + precedence. + """ + if 'AWS_CREDENTIAL_FILE' in os.environ: + path = os.environ['AWS_CREDENTIAL_FILE'] + path = os.path.expanduser(path) + path = os.path.expandvars(path) + if os.path.isfile(path): + fp = open(path) + lines = fp.readlines() + fp.close() + for line in lines: + if line[0] != '#': + if '=' in line: + name, value = line.split('=', 1) + if name.strip() == 'AWSAccessKeyId': + if 'aws_access_key_id' not in self.args: + value = value.strip() + self.args['aws_access_key_id'] = value + elif name.strip() == 'AWSSecretKey': + if 'aws_secret_access_key' not in self.args: + value = value.strip() + self.args['aws_secret_access_key'] = value + else: + print('Warning: unable to read AWS_CREDENTIAL_FILE') + + def check_for_env_url(self): + """ + First checks to see if a url argument was explicitly passed + in. If so, that will be used. If not, it checks for the + existence of the environment variable specified in ENV_URL. + If this is set, it should contain a fully qualified URL to the + service you want to use. + Note that any values passed explicitly to the class constructor + will take precedence. + """ + url = self.args.get('url', None) + if url: + del self.args['url'] + if not url and self.EnvURL in os.environ: + url = os.environ[self.EnvURL] + if url: + rslt = urlparse.urlparse(url) + if 'is_secure' not in self.args: + if rslt.scheme == 'https': + self.args['is_secure'] = True + else: + self.args['is_secure'] = False + + host = rslt.netloc + port = None + l = host.split(':') + if len(l) > 1: + host = l[0] + port = int(l[1]) + if 'host' not in self.args: + self.args['host'] = host + if port and 'port' not in self.args: + self.args['port'] = port + + if rslt.path and 'path' not in self.args: + self.args['path'] = rslt.path + + def _required_auth_capability(self): + return [self.Authentication] + diff --git a/ext/boto/roboto/param.py b/ext/boto/roboto/param.py new file mode 100644 index 0000000000..35a25b4af5 --- /dev/null +++ b/ext/boto/roboto/param.py @@ -0,0 +1,147 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os + +class Converter(object): + + @classmethod + def convert_string(cls, param, value): + # TODO: could do length validation, etc. here + if not isinstance(value, basestring): + raise ValueError + return value + + @classmethod + def convert_integer(cls, param, value): + # TODO: could do range checking here + return int(value) + + @classmethod + def convert_boolean(cls, param, value): + """ + For command line arguments, just the presence + of the option means True so just return True + """ + return True + + @classmethod + def convert_file(cls, param, value): + if os.path.exists(value) and not os.path.isdir(value): + return value + raise ValueError + + @classmethod + def convert_dir(cls, param, value): + if os.path.isdir(value): + return value + raise ValueError + + @classmethod + def convert(cls, param, value): + try: + if hasattr(cls, 'convert_'+param.ptype): + mthd = getattr(cls, 'convert_'+param.ptype) + else: + mthd = cls.convert_string + return mthd(param, value) + except: + raise ValidationException(param, '') + +class Param(Converter): + + def __init__(self, name=None, ptype='string', optional=True, + short_name=None, long_name=None, doc='', + metavar=None, cardinality=1, default=None, + choices=None, encoder=None, request_param=True): + self.name = name + self.ptype = ptype + self.optional = optional + self.short_name = short_name + self.long_name = long_name + self.doc = doc + self.metavar = metavar + self.cardinality = cardinality + self.default = default + self.choices = choices + self.encoder = encoder + self.request_param = request_param + + @property + def optparse_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def synopsis_long_name(self): + ln = None + if self.long_name: + ln = '--%s' % self.long_name + return ln + + @property + def getopt_long_name(self): + ln = None + if self.long_name: + ln = '%s' % self.long_name + if self.ptype != 'boolean': + ln += '=' + return ln + + @property + def optparse_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def synopsis_short_name(self): + sn = None + if self.short_name: + sn = '-%s' % self.short_name + return sn + + @property + def getopt_short_name(self): + sn = None + if self.short_name: + sn = '%s' % self.short_name + if self.ptype != 'boolean': + sn += ':' + return sn + + def convert(self, value): + """ + Convert a string value as received in the command line + tools and convert to the appropriate type of value. + Raise a ValidationError if the value can't be converted. + + :type value: str + :param value: The value to convert. This should always + be a string. + """ + return super(Param, self).convert(self,value) + + diff --git a/ext/boto/route53/__init__.py b/ext/boto/route53/__init__.py new file mode 100644 index 0000000000..05b196f2c9 --- /dev/null +++ b/ext/boto/route53/__init__.py @@ -0,0 +1,94 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +# this is here for backward compatibility +# originally, the Route53Connection class was defined here +from boto.route53.connection import Route53Connection +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +class Route53RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Route53 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + regions = get_regions( + 'route53', + region_cls=Route53RegionInfo, + connection_cls=Route53Connection + ) + + # For historical reasons, we had a "universal" endpoint as well. + regions.append( + Route53RegionInfo( + name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection + ) + ) + + return regions + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.route53.connection.Route53Connection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.route53.connection.Route53Connection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + if region_name == 'universal': + region = Route53RegionInfo( + name='universal', + endpoint='route53.amazonaws.com', + connection_cls=Route53Connection + ) + return region.connect(**kw_params) + + return connect('route53', region_name, region_cls=Route53RegionInfo, + connection_cls=Route53Connection, **kw_params) diff --git a/ext/boto/route53/connection.py b/ext/boto/route53/connection.py new file mode 100644 index 0000000000..a7e7377924 --- /dev/null +++ b/ext/boto/route53/connection.py @@ -0,0 +1,613 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.route53 import exception +import random +import uuid +import xml.sax + +import boto +from boto.connection import AWSAuthConnection +from boto import handler +import boto.jsonresponse +from boto.route53.record import ResourceRecordSets +from boto.route53.zone import Zone +from boto.compat import six, urllib + + +HZXML = """ + + %(name)s + %(caller_ref)s + + %(comment)s + +""" + +HZPXML = """ + + %(name)s + + %(vpc_id)s + %(vpc_region)s + + %(caller_ref)s + + %(comment)s + +""" + +# boto.set_stream_logger('dns') + + +class Route53Connection(AWSAuthConnection): + DefaultHost = 'route53.amazonaws.com' + """The default Route53 API endpoint to connect to.""" + + Version = '2013-04-01' + """Route53 API version.""" + + XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/' + """XML schema for this Route53 API version.""" + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0, security_token=None, + validate_certs=True, https_connection_factory=None, + profile_name=None): + super(Route53Connection, self).__init__( + host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug, + security_token=security_token, + validate_certs=validate_certs, + https_connection_factory=https_connection_factory, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['route53'] + + def make_request(self, action, path, headers=None, data='', params=None): + if params: + pairs = [] + for key, val in six.iteritems(params): + if val is None: + continue + pairs.append(key + '=' + urllib.parse.quote(str(val))) + path += '?' + '&'.join(pairs) + return super(Route53Connection, self).make_request( + action, path, headers, data, + retry_handler=self._retry_handler) + + # Hosted Zones + + def get_all_hosted_zones(self, start_marker=None, zone_list=None): + """ + Returns a Python data structure with information about all + Hosted Zones defined for the AWS account. + + :param int start_marker: start marker to pass when fetching additional + results after a truncated list + :param list zone_list: a HostedZones list to prepend to results + """ + params = {} + if start_marker: + params = {'marker': start_marker} + response = self.make_request('GET', '/%s/hostedzone' % self.Version, + params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HostedZones', + item_marker=('HostedZone',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + if zone_list: + e['ListHostedZonesResponse']['HostedZones'].extend(zone_list) + while 'NextMarker' in e['ListHostedZonesResponse']: + next_marker = e['ListHostedZonesResponse']['NextMarker'] + zone_list = e['ListHostedZonesResponse']['HostedZones'] + e = self.get_all_hosted_zones(next_marker, zone_list) + return e + + def get_hosted_zone(self, hosted_zone_id): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + """ + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_hosted_zone_by_name(self, hosted_zone_name): + """ + Get detailed information about a particular Hosted Zone. + + :type hosted_zone_name: str + :param hosted_zone_name: The fully qualified domain name for the Hosted + Zone + + """ + if hosted_zone_name[-1] != '.': + hosted_zone_name += '.' + all_hosted_zones = self.get_all_hosted_zones() + for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']: + # check that they gave us the FQDN for their zone + if zone['Name'] == hosted_zone_name: + return self.get_hosted_zone(zone['Id'].split('/')[-1]) + + def create_hosted_zone(self, domain_name, caller_ref=None, comment='', + private_zone=False, vpc_id=None, vpc_region=None): + """ + Create a new Hosted Zone. Returns a Python data structure with + information about the newly created Hosted Zone. + + :type domain_name: str + :param domain_name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request.A list of strings with the image + IDs wanted. + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHostedZone requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + :type comment: str + :param comment: Any comments you want to include about the hosted + zone. + + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_region: When creating a private hosted zone, the region + of the associated VPC is required. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + if private_zone: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'vpc_id': vpc_id, + 'vpc_region': vpc_region, + 'xmlns': self.XMLNameSpace} + xml_body = HZPXML % params + else: + params = {'name': domain_name, + 'caller_ref': caller_ref, + 'comment': comment, + 'xmlns': self.XMLNameSpace} + xml_body = HZXML % params + uri = '/%s/hostedzone' % self.Version + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element(list_marker='NameServers', + item_marker=('NameServer',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, + response.reason, + body) + + def delete_hosted_zone(self, hosted_zone_id): + """ + Delete the hosted zone specified by the given id. + + :type hosted_zone_id: str + :param hosted_zone_id: The hosted zone's id + + """ + uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Health checks + + POSTHCXMLBody = """ + %(caller_ref)s + %(health_check)s + """ + + def create_health_check(self, health_check, caller_ref=None): + """ + Create a new Health Check + + :type health_check: HealthCheck + :param health_check: HealthCheck object + + :type caller_ref: str + :param caller_ref: A unique string that identifies the request + and that allows failed CreateHealthCheckRequest requests to be retried + without the risk of executing the operation twice. If you don't + provide a value for this, boto will generate a Type 4 UUID and + use that. + + """ + if caller_ref is None: + caller_ref = str(uuid.uuid4()) + uri = '/%s/healthcheck' % self.Version + params = {'xmlns': self.XMLNameSpace, + 'caller_ref': caller_ref, + 'health_check': health_check.to_xml() + } + xml_body = self.POSTHCXMLBody % params + response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body) + body = response.read() + boto.log.debug(body) + if response.status == 201: + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + raise exception.DNSServerError(response.status, response.reason, body) + + def get_list_health_checks(self, maxitems=None, marker=None): + """ + Return a list of health checks + + :type maxitems: int + :param maxitems: Maximum number of items to return + + :type marker: str + :param marker: marker to get next set of items to list + + """ + + params = {} + if maxitems is not None: + params['maxitems'] = maxitems + if marker is not None: + params['marker'] = marker + + uri = '/%s/healthcheck' % (self.Version, ) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='HealthChecks', + item_marker=('HealthCheck',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_checker_ip_ranges(self): + """ + Return a list of Route53 healthcheck IP ranges + """ + uri = '/%s/checkeripranges' % self.Version + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',)) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def delete_health_check(self, health_check_id): + """ + Delete a health check + + :type health_check_id: str + :param health_check_id: ID of the health check to delete + + """ + uri = '/%s/healthcheck/%s' % (self.Version, health_check_id) + response = self.make_request('DELETE', uri) + body = response.read() + boto.log.debug(body) + if response.status not in (200, 204): + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + # Resource Record Sets + + def get_all_rrsets(self, hosted_zone_id, type=None, + name=None, identifier=None, maxitems=None): + """ + Retrieve the Resource Record Sets defined for this Hosted Zone. + Returns the raw XML data returned by the Route53 call. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type type: str + :param type: The type of resource record set to begin the record + listing from. Valid choices are: + + * A + * AAAA + * CNAME + * MX + * NS + * PTR + * SOA + * SPF + * SRV + * TXT + + Valid values for weighted resource record sets: + + * A + * AAAA + * CNAME + * TXT + + Valid values for Zone Apex Aliases: + + * A + * AAAA + + :type name: str + :param name: The first name in the lexicographic ordering of domain + names to be retrieved + + :type identifier: str + :param identifier: In a hosted zone that includes weighted resource + record sets (multiple resource record sets with the same DNS + name and type that are differentiated only by SetIdentifier), + if results were truncated for a given DNS name and type, + the value of SetIdentifier for the next resource record + set that has the current DNS name and type + + :type maxitems: int + :param maxitems: The maximum number of records + + """ + params = {'type': type, 'name': name, + 'identifier': identifier, 'maxitems': maxitems} + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('GET', uri, params=params) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def change_rrsets(self, hosted_zone_id, xml_body): + """ + Create or change the authoritative DNS information for this + Hosted Zone. + Returns a Python data structure with information about the set of + changes, including the Change ID. + + :type hosted_zone_id: str + :param hosted_zone_id: The unique identifier for the Hosted Zone + + :type xml_body: str + :param xml_body: The list of changes to be made, defined in the + XML schema defined by the Route53 service. + + """ + uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id) + response = self.make_request('POST', uri, + {'Content-Type': 'text/xml'}, + xml_body) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def get_change(self, change_id): + """ + Get information about a proposed set of changes, as submitted + by the change_rrsets method. + Returns a Python data structure with status information about the + changes. + + :type change_id: str + :param change_id: The unique identifier for the set of changes. + This ID is returned in the response to the change_rrsets method. + + """ + uri = '/%s/change/%s' % (self.Version, change_id) + response = self.make_request('GET', uri) + body = response.read() + boto.log.debug(body) + if response.status >= 300: + raise exception.DNSServerError(response.status, + response.reason, + body) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + + def create_zone(self, name, private_zone=False, + vpc_id=None, vpc_region=None): + """ + Create a new Hosted Zone. Returns a Zone object for the newly + created Hosted Zone. + + :type name: str + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. If you omit the final period, + Amazon Route 53 assumes the domain is relative to the root. + This is the name you have registered with your DNS registrar. + It is also the name you will delegate from your registrar to + the Amazon Route 53 delegation servers returned in + response to this request. + + :type private_zone: bool + :param private_zone: Set True if creating a private hosted zone. + + :type vpc_id: str + :param vpc_id: When creating a private hosted zone, the VPC Id to + associate to is required. + + :type vpc_region: str + :param vpc_region: When creating a private hosted zone, the region + of the associated VPC is required. + """ + zone = self.create_hosted_zone(name, private_zone=private_zone, + vpc_id=vpc_id, vpc_region=vpc_region) + return Zone(self, zone['CreateHostedZoneResponse']['HostedZone']) + + def get_zone(self, name): + """ + Returns a Zone object for the specified Hosted Zone. + + :param name: The name of the domain. This should be a + fully-specified domain, and should end with a final period + as the last label indication. + """ + name = self._make_qualified(name) + for zone in self.get_zones(): + if name == zone.name: + return zone + + def get_zones(self): + """ + Returns a list of Zone objects, one for each of the Hosted + Zones defined for the AWS account. + + :rtype: list + :returns: A list of Zone objects. + + """ + zones = self.get_all_hosted_zones() + return [Zone(self, zone) for zone in + zones['ListHostedZonesResponse']['HostedZones']] + + def _make_qualified(self, value): + """ + Ensure passed domain names end in a period (.) character. + This will usually make a domain fully qualified. + """ + if type(value) in [list, tuple, set]: + new_list = [] + for record in value: + if record and not record[-1] == '.': + new_list.append("%s." % record) + else: + new_list.append(record) + return new_list + else: + value = value.strip() + if value and not value[-1] == '.': + value = "%s." % value + return value + + def _retry_handler(self, response, i, next_sleep): + status = None + boto.log.debug("Saw HTTP status: %s" % response.status) + + if response.status == 400: + body = response.read() + + # We need to parse the error first + err = exception.DNSServerError( + response.status, + response.reason, + body) + if err.error_code: + # This is a case where we need to ignore a 400 error, as + # Route53 returns this. See + # http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html + if not err.error_code in ( + 'PriorRequestNotComplete', + 'Throttling', + 'ServiceUnavailable', + 'RequestExpired'): + return status + msg = "%s, retry attempt %s" % ( + err.error_code, + i + ) + next_sleep = min(random.random() * (2 ** i), + boto.config.get('Boto', 'max_retry_delay', 60)) + i += 1 + status = (msg, i, next_sleep) + + return status diff --git a/ext/boto/route53/domains/__init__.py b/ext/boto/route53/domains/__init__.py new file mode 100644 index 0000000000..7554494b32 --- /dev/null +++ b/ext/boto/route53/domains/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Route 53 Domains service. + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.route53.domains.layer1 import Route53DomainsConnection + return get_regions('route53domains', + connection_cls=Route53DomainsConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.route53.domains.layer1 import Route53DomainsConnection + return connect('route53domains', region_name, + connection_cls=Route53DomainsConnection, **kw_params) diff --git a/ext/boto/route53/domains/exceptions.py b/ext/boto/route53/domains/exceptions.py new file mode 100644 index 0000000000..51eb673003 --- /dev/null +++ b/ext/boto/route53/domains/exceptions.py @@ -0,0 +1,46 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import BotoServerError + + +class DuplicateRequest(BotoServerError): + pass + + +class DomainLimitExceeded(BotoServerError): + pass + + +class InvalidInput(BotoServerError): + pass + + +class OperationLimitExceeded(BotoServerError): + pass + + +class UnsupportedTLD(BotoServerError): + pass + + +class TLDRulesViolation(BotoServerError): + pass diff --git a/ext/boto/route53/domains/layer1.py b/ext/boto/route53/domains/layer1.py new file mode 100644 index 0000000000..537948f7b7 --- /dev/null +++ b/ext/boto/route53/domains/layer1.py @@ -0,0 +1,868 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.route53.domains import exceptions + + +class Route53DomainsConnection(AWSQueryConnection): + """ + + """ + APIVersion = "2014-05-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "route53domains.us-east-1.amazonaws.com" + ServiceName = "Route53Domains" + TargetPrefix = "Route53Domains_v20140515" + ResponseError = JSONResponseError + + _faults = { + "DuplicateRequest": exceptions.DuplicateRequest, + "DomainLimitExceeded": exceptions.DomainLimitExceeded, + "InvalidInput": exceptions.InvalidInput, + "OperationLimitExceeded": exceptions.OperationLimitExceeded, + "UnsupportedTLD": exceptions.UnsupportedTLD, + "TLDRulesViolation": exceptions.TLDRulesViolation, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(Route53DomainsConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def check_domain_availability(self, domain_name, idn_lang_code=None): + """ + This operation checks the availability of one domain name. You + can access this API without authenticating. Note that if the + availability status of a domain is pending, you must submit + another request to determine the availability of the domain + name. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + """ + params = {'DomainName': domain_name, } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + return self.make_request(action='CheckDomainAvailability', + body=json.dumps(params)) + + def disable_domain_transfer_lock(self, domain_name): + """ + This operation removes the transfer lock on the domain + (specifically the `clientTransferProhibited` status) to allow + domain transfers. We recommend you refrain from performing + this action unless you intend to transfer the domain to a + different registrar. Successful submission returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='DisableDomainTransferLock', + body=json.dumps(params)) + + def enable_domain_transfer_lock(self, domain_name): + """ + This operation sets the transfer lock on the domain + (specifically the `clientTransferProhibited` status) to + prevent domain transfers. Successful submission returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='EnableDomainTransferLock', + body=json.dumps(params)) + + def get_domain_detail(self, domain_name): + """ + This operation returns detailed information about the domain. + The domain's contact information is also returned as part of + the output. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='GetDomainDetail', + body=json.dumps(params)) + + def get_operation_detail(self, operation_id): + """ + This operation returns the current status of an operation that + is not completed. + + :type operation_id: string + :param operation_id: The identifier for the operation for which you + want to get the status. Amazon Route 53 returned the identifier in + the response to the original request. + Type: String + + Default: None + + Required: Yes + + """ + params = {'OperationId': operation_id, } + return self.make_request(action='GetOperationDetail', + body=json.dumps(params)) + + def list_domains(self, marker=None, max_items=None): + """ + This operation returns all the domain names registered with + Amazon Route 53 for the current AWS account. + + :type marker: string + :param marker: For an initial request for a list of domains, omit this + element. If the number of domains that are associated with the + current AWS account is greater than the value that you specified + for `MaxItems`, you can use `Marker` to return additional domains. + Get the value of `NextPageMarker` from the previous response, and + submit another request that includes the value of `NextPageMarker` + in the `Marker` element. + Type: String + + Default: None + + Constraints: The marker must match the value specified in the previous + request. + + Required: No + + :type max_items: integer + :param max_items: Number of domains to be returned. + Type: Integer + + Default: 20 + + Constraints: A numeral between 1 and 100. + + Required: No + + """ + params = {} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.make_request(action='ListDomains', + body=json.dumps(params)) + + def list_operations(self, marker=None, max_items=None): + """ + This operation returns the operation IDs of operations that + are not yet complete. + + :type marker: string + :param marker: For an initial request for a list of operations, omit + this element. If the number of operations that are not yet complete + is greater than the value that you specified for `MaxItems`, you + can use `Marker` to return additional operations. Get the value of + `NextPageMarker` from the previous response, and submit another + request that includes the value of `NextPageMarker` in the `Marker` + element. + Type: String + + Default: None + + Required: No + + :type max_items: integer + :param max_items: Number of domains to be returned. + Type: Integer + + Default: 20 + + Constraints: A value between 1 and 100. + + Required: No + + """ + params = {} + if marker is not None: + params['Marker'] = marker + if max_items is not None: + params['MaxItems'] = max_items + return self.make_request(action='ListOperations', + body=json.dumps(params)) + + def register_domain(self, domain_name, duration_in_years, admin_contact, + registrant_contact, tech_contact, idn_lang_code=None, + auto_renew=None, privacy_protect_admin_contact=None, + privacy_protect_registrant_contact=None, + privacy_protect_tech_contact=None): + """ + This operation registers a domain. Domains are registered by + the AWS registrar partner, Gandi. For some top-level domains + (TLDs), this operation requires extra parameters. + + When you register a domain, Amazon Route 53 does the + following: + + + + Creates a Amazon Route 53 hosted zone that has the same name + as the domain. Amazon Route 53 assigns four name servers to + your hosted zone and automatically updates your domain + registration with the names of these name servers. + + Enables autorenew, so your domain registration will renew + automatically each year. We'll notify you in advance of the + renewal date so you can choose whether to renew the + registration. + + Optionally enables privacy protection, so WHOIS queries + return contact information for our registrar partner, Gandi, + instead of the information you entered for registrant, admin, + and tech contacts. + + If registration is successful, returns an operation ID that + you can use to track the progress and completion of the + action. If the request is not completed successfully, the + domain registrant is notified by email. + + Charges your AWS account an amount based on the top-level + domain. For more information, see `Amazon Route 53 Pricing`_. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + :type duration_in_years: integer + :param duration_in_years: The number of years the domain will be + registered. Domains are registered for a minimum of one year. The + maximum period depends on the top-level domain. + Type: Integer + + Default: 1 + + Valid values: Integer from 1 to 10 + + Required: Yes + + :type auto_renew: boolean + :param auto_renew: Indicates whether the domain will be automatically + renewed ( `True`) or not ( `False`). Autorenewal only takes effect + after the account is charged. + Type: Boolean + + Valid values: `True` | `False` + + Default: `True` + + Required: No + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type privacy_protect_admin_contact: boolean + :param privacy_protect_admin_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_registrant_contact: boolean + :param privacy_protect_registrant_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_tech_contact: boolean + :param privacy_protect_tech_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + """ + params = { + 'DomainName': domain_name, + 'DurationInYears': duration_in_years, + 'AdminContact': admin_contact, + 'RegistrantContact': registrant_contact, + 'TechContact': tech_contact, + } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + if auto_renew is not None: + params['AutoRenew'] = auto_renew + if privacy_protect_admin_contact is not None: + params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact + if privacy_protect_registrant_contact is not None: + params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact + if privacy_protect_tech_contact is not None: + params['PrivacyProtectTechContact'] = privacy_protect_tech_contact + return self.make_request(action='RegisterDomain', + body=json.dumps(params)) + + def retrieve_domain_auth_code(self, domain_name): + """ + This operation returns the AuthCode for the domain. To + transfer a domain to another registrar, you provide this value + to the new registrar. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + """ + params = {'DomainName': domain_name, } + return self.make_request(action='RetrieveDomainAuthCode', + body=json.dumps(params)) + + def transfer_domain(self, domain_name, duration_in_years, nameservers, + admin_contact, registrant_contact, tech_contact, + idn_lang_code=None, auth_code=None, auto_renew=None, + privacy_protect_admin_contact=None, + privacy_protect_registrant_contact=None, + privacy_protect_tech_contact=None): + """ + This operation transfers a domain from another registrar to + Amazon Route 53. Domains are registered by the AWS registrar, + Gandi upon transfer. + + To transfer a domain, you need to meet all the domain transfer + criteria, including the following: + + + + You must supply nameservers to transfer a domain. + + You must disable the domain transfer lock (if any) before + transferring the domain. + + A minimum of 60 days must have elapsed since the domain's + registration or last transfer. + + + We recommend you use the Amazon Route 53 as the DNS service + for your domain. You can create a hosted zone in Amazon Route + 53 for your current domain before transferring your domain. + + Note that upon transfer, the domain duration is extended for a + year if not otherwise specified. Autorenew is enabled by + default. + + If the transfer is successful, this method returns an + operation ID that you can use to track the progress and + completion of the action. If the request is not completed + successfully, the domain registrant will be notified by email. + + Transferring domains charges your AWS account an amount based + on the top-level domain. For more information, see `Amazon + Route 53 Pricing`_. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type idn_lang_code: string + :param idn_lang_code: Reserved for future use. + + :type duration_in_years: integer + :param duration_in_years: The number of years the domain will be + registered. Domains are registered for a minimum of one year. The + maximum period depends on the top-level domain. + Type: Integer + + Default: 1 + + Valid values: Integer from 1 to 10 + + Required: Yes + + :type nameservers: list + :param nameservers: Contains details for the host and glue IP + addresses. + Type: Complex + + Children: `GlueIps`, `Name` + + :type auth_code: string + :param auth_code: The authorization code for the domain. You get this + value from the current registrar. + Type: String + + Required: Yes + + :type auto_renew: boolean + :param auto_renew: Indicates whether the domain will be automatically + renewed (true) or not (false). Autorenewal only takes effect after + the account is charged. + Type: Boolean + + Valid values: `True` | `False` + + Default: true + + Required: No + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type privacy_protect_admin_contact: boolean + :param privacy_protect_admin_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_registrant_contact: boolean + :param privacy_protect_registrant_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + :type privacy_protect_tech_contact: boolean + :param privacy_protect_tech_contact: Whether you want to conceal + contact information from WHOIS queries. If you specify true, WHOIS + ("who is") queries will return contact information for our + registrar partner, Gandi, instead of the contact information that + you enter. + Type: Boolean + + Default: `True` + + Valid values: `True` | `False` + + Required: No + + """ + params = { + 'DomainName': domain_name, + 'DurationInYears': duration_in_years, + 'Nameservers': nameservers, + 'AdminContact': admin_contact, + 'RegistrantContact': registrant_contact, + 'TechContact': tech_contact, + } + if idn_lang_code is not None: + params['IdnLangCode'] = idn_lang_code + if auth_code is not None: + params['AuthCode'] = auth_code + if auto_renew is not None: + params['AutoRenew'] = auto_renew + if privacy_protect_admin_contact is not None: + params['PrivacyProtectAdminContact'] = privacy_protect_admin_contact + if privacy_protect_registrant_contact is not None: + params['PrivacyProtectRegistrantContact'] = privacy_protect_registrant_contact + if privacy_protect_tech_contact is not None: + params['PrivacyProtectTechContact'] = privacy_protect_tech_contact + return self.make_request(action='TransferDomain', + body=json.dumps(params)) + + def update_domain_contact(self, domain_name, admin_contact=None, + registrant_contact=None, tech_contact=None): + """ + This operation updates the contact information for a + particular domain. Information for at least one contact + (registrant, administrator, or technical) must be supplied for + update. + + If the update is successful, this method returns an operation + ID that you can use to track the progress and completion of + the action. If the request is not completed successfully, the + domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type admin_contact: dict + :param admin_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type registrant_contact: dict + :param registrant_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + :type tech_contact: dict + :param tech_contact: Provides detailed contact information. + Type: Complex + + Children: `FirstName`, `MiddleName`, `LastName`, `ContactType`, + `OrganizationName`, `AddressLine1`, `AddressLine2`, `City`, + `State`, `CountryCode`, `ZipCode`, `PhoneNumber`, `Email`, `Fax`, + `ExtraParams` + + Required: Yes + + """ + params = {'DomainName': domain_name, } + if admin_contact is not None: + params['AdminContact'] = admin_contact + if registrant_contact is not None: + params['RegistrantContact'] = registrant_contact + if tech_contact is not None: + params['TechContact'] = tech_contact + return self.make_request(action='UpdateDomainContact', + body=json.dumps(params)) + + def update_domain_contact_privacy(self, domain_name, admin_privacy=None, + registrant_privacy=None, + tech_privacy=None): + """ + This operation updates the specified domain contact's privacy + setting. When the privacy option is enabled, personal + information such as postal or email address is hidden from the + results of a public WHOIS query. The privacy services are + provided by the AWS registrar, Gandi. For more information, + see the `Gandi privacy features`_. + + This operation only affects the privacy of the specified + contact type (registrant, administrator, or tech). Successful + acceptance returns an operation ID that you can use with + GetOperationDetail to track the progress and completion of the + action. If the request is not completed successfully, the + domain registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type admin_privacy: boolean + :param admin_privacy: Whether you want to conceal contact information + from WHOIS queries. If you specify true, WHOIS ("who is") queries + will return contact information for our registrar partner, Gandi, + instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + :type registrant_privacy: boolean + :param registrant_privacy: Whether you want to conceal contact + information from WHOIS queries. If you specify true, WHOIS ("who + is") queries will return contact information for our registrar + partner, Gandi, instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + :type tech_privacy: boolean + :param tech_privacy: Whether you want to conceal contact information + from WHOIS queries. If you specify true, WHOIS ("who is") queries + will return contact information for our registrar partner, Gandi, + instead of the contact information that you enter. + Type: Boolean + + Default: None + + Valid values: `True` | `False` + + Required: No + + """ + params = {'DomainName': domain_name, } + if admin_privacy is not None: + params['AdminPrivacy'] = admin_privacy + if registrant_privacy is not None: + params['RegistrantPrivacy'] = registrant_privacy + if tech_privacy is not None: + params['TechPrivacy'] = tech_privacy + return self.make_request(action='UpdateDomainContactPrivacy', + body=json.dumps(params)) + + def update_domain_nameservers(self, domain_name, nameservers): + """ + This operation replaces the current set of name servers for + the domain with the specified set of name servers. If you use + Amazon Route 53 as your DNS service, specify the four name + servers in the delegation set for the hosted zone for the + domain. + + If successful, this operation returns an operation ID that you + can use to track the progress and completion of the action. If + the request is not completed successfully, the domain + registrant will be notified by email. + + :type domain_name: string + :param domain_name: The name of a domain. + Type: String + + Default: None + + Constraints: The domain name can contain only the letters a through z, + the numbers 0 through 9, and hyphen (-). Internationalized Domain + Names are not supported. + + Required: Yes + + :type nameservers: list + :param nameservers: A list of new name servers for the domain. + Type: Complex + + Children: `Name`, `GlueIps` + + Required: Yes + + """ + params = { + 'DomainName': domain_name, + 'Nameservers': nameservers, + } + return self.make_request(action='UpdateDomainNameservers', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/ext/boto/route53/exception.py b/ext/boto/route53/exception.py new file mode 100644 index 0000000000..61b33b0c57 --- /dev/null +++ b/ext/boto/route53/exception.py @@ -0,0 +1,27 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + + +class DNSServerError(BotoServerError): + pass diff --git a/ext/boto/route53/healthcheck.py b/ext/boto/route53/healthcheck.py new file mode 100644 index 0000000000..43fdf17924 --- /dev/null +++ b/ext/boto/route53/healthcheck.py @@ -0,0 +1,146 @@ +# Copyright (c) 2014 Tellybug, Matt Millar +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +""" +From http://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateHealthCheck.html + +POST /2013-04-01/healthcheck HTTP/1.1 + + + + unique description + + IP address of the endpoint to check + port on the endpoint to check + HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + path of the file that + you want Amazon Route 53 to request + domain name of the + endpoint to check + if Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, + the string to search for in the response body + from the specified resource + 10 | 30 + integer between 1 and 10 + + +""" + + +class HealthCheck(object): + + """An individual health check""" + + POSTXMLBody = """ + + %(ip_addr_part)s + %(port)s + %(type)s + %(resource_path)s + %(fqdn_part)s + %(string_match_part)s + %(request_interval)s + %(failure_threshold)s + + """ + + XMLIpAddrPart = """%(ip_addr)s""" + + XMLFQDNPart = """%(fqdn)s""" + + XMLStringMatchPart = """%(string_match)s""" + + XMLRequestIntervalPart = """%(request_interval)d""" + + valid_request_intervals = (10, 30) + + def __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3): + """ + HealthCheck object + + :type ip_addr: str + :param ip_addr: Optional IP Address + + :type port: int + :param port: Port to check + + :type hc_type: str + :param hc_type: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP + + :type resource_path: str + :param resource_path: Path to check + + :type fqdn: str + :param fqdn: domain name of the endpoint to check + + :type string_match: str + :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource + + :type request_interval: int + :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request. + + :type failure_threshold: int + :param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa. + + """ + self.ip_addr = ip_addr + self.port = port + self.hc_type = hc_type + self.resource_path = resource_path + self.fqdn = fqdn + self.string_match = string_match + self.failure_threshold = failure_threshold + + if request_interval in self.valid_request_intervals: + self.request_interval = request_interval + else: + raise AttributeError( + "Valid values for request_interval are: %s" % + ",".join(str(i) for i in self.valid_request_intervals)) + + if failure_threshold < 1 or failure_threshold > 10: + raise AttributeError( + 'Valid values for failure_threshold are 1 - 10.') + + def to_xml(self): + params = { + 'ip_addr_part': '', + 'port': self.port, + 'type': self.hc_type, + 'resource_path': self.resource_path, + 'fqdn_part': "", + 'string_match_part': "", + 'request_interval': (self.XMLRequestIntervalPart % + {'request_interval': self.request_interval}), + 'failure_threshold': self.failure_threshold, + } + if self.fqdn is not None: + params['fqdn_part'] = self.XMLFQDNPart % {'fqdn': self.fqdn} + + if self.ip_addr: + params['ip_addr_part'] = self.XMLIpAddrPart % {'ip_addr': self.ip_addr} + + if self.string_match is not None: + params['string_match_part'] = self.XMLStringMatchPart % {'string_match': self.string_match} + + return self.POSTXMLBody % params diff --git a/ext/boto/route53/hostedzone.py b/ext/boto/route53/hostedzone.py new file mode 100644 index 0000000000..9321538266 --- /dev/null +++ b/ext/boto/route53/hostedzone.py @@ -0,0 +1,51 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class HostedZone(object): + + def __init__(self, id=None, name=None, owner=None, version=None, + caller_reference=None): + self.id = id + self.name = name + self.owner = owner + self.version = version + self.caller_reference = caller_reference + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Name': + self.name = value + elif name == 'Owner': + self.owner = value + elif name == 'Version': + self.version = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) diff --git a/ext/boto/route53/record.py b/ext/boto/route53/record.py new file mode 100644 index 0000000000..05cddce6a2 --- /dev/null +++ b/ext/boto/route53/record.py @@ -0,0 +1,374 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF'] + +from boto.resultset import ResultSet + + +class ResourceRecordSets(ResultSet): + """ + A list of resource records. + + :ivar hosted_zone_id: The ID of the hosted zone. + :ivar comment: A comment that will be stored with the change. + :ivar changes: A list of changes. + """ + + ChangeResourceRecordSetsBody = """ + + + %(comment)s + %(changes)s + + """ + + ChangeXML = """ + %(action)s + %(record)s + """ + + def __init__(self, connection=None, hosted_zone_id=None, comment=None): + self.connection = connection + self.hosted_zone_id = hosted_zone_id + self.comment = comment + self.changes = [] + self.next_record_name = None + self.next_record_type = None + self.next_record_identifier = None + super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)]) + + def __repr__(self): + if self.changes: + record_list = ','.join([c.__repr__() for c in self.changes]) + else: + record_list = ','.join([record.__repr__() for record in self]) + return '%s""" + + XMLBody = """ + %(name)s + %(type)s + %(weight)s + %(body)s + %(health_check)s + """ + + WRRBody = """ + %(identifier)s + %(weight)s + """ + + RRRBody = """ + %(identifier)s + %(region)s + """ + + FailoverBody = """ + %(identifier)s + %(failover)s + """ + + ResourceRecordsBody = """ + %(ttl)s + + %(records)s + """ + + ResourceRecordBody = """ + %s + """ + + AliasBody = """ + %(hosted_zone_id)s + %(dns_name)s + %(eval_target_health)s + """ + + EvaluateTargetHealth = """%s""" + + def __init__(self, name=None, type=None, ttl=600, resource_records=None, + alias_hosted_zone_id=None, alias_dns_name=None, identifier=None, + weight=None, region=None, alias_evaluate_target_health=None, + health_check=None, failover=None): + self.name = name + self.type = type + self.ttl = ttl + if resource_records is None: + resource_records = [] + self.resource_records = resource_records + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + self.identifier = identifier + self.weight = weight + self.region = region + self.alias_evaluate_target_health = alias_evaluate_target_health + self.health_check = health_check + self.failover = failover + + def __repr__(self): + return '' % (self.name, self.type, self.to_print()) + + def add_value(self, value): + """Add a resource record value""" + self.resource_records.append(value) + + def set_alias(self, alias_hosted_zone_id, alias_dns_name, + alias_evaluate_target_health=False): + """Make this an alias resource record set""" + self.alias_hosted_zone_id = alias_hosted_zone_id + self.alias_dns_name = alias_dns_name + self.alias_evaluate_target_health = alias_evaluate_target_health + + def to_xml(self): + """Spit this resource record set out as XML""" + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: + # Use alias + if self.alias_evaluate_target_health is not None: + eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false') + else: + eval_target_health = "" + + body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id, + "dns_name": self.alias_dns_name, + "eval_target_health": eval_target_health} + else: + # Use resource record(s) + records = "" + + for r in self.resource_records: + records += self.ResourceRecordBody % r + + body = self.ResourceRecordsBody % { + "ttl": self.ttl, + "records": records, + } + + weight = "" + + if self.identifier is not None and self.weight is not None: + weight = self.WRRBody % {"identifier": self.identifier, + "weight": self.weight} + elif self.identifier is not None and self.region is not None: + weight = self.RRRBody % {"identifier": self.identifier, + "region": self.region} + elif self.identifier is not None and self.failover is not None: + weight = self.FailoverBody % {"identifier": self.identifier, + "failover": self.failover} + + health_check = "" + if self.health_check is not None: + health_check = self.HealthCheckBody % (self.health_check) + + params = { + "name": self.name, + "type": self.type, + "weight": weight, + "body": body, + "health_check": health_check + } + return self.XMLBody % params + + def to_print(self): + rr = "" + if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None: + # Show alias + rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name + if self.alias_evaluate_target_health is not None: + rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health + else: + # Show resource record(s) + rr = ",".join(self.resource_records) + + if self.identifier is not None and self.weight is not None: + rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight) + elif self.identifier is not None and self.region is not None: + rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region) + elif self.identifier is not None and self.failover is not None: + rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover) + + return rr + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'Type': + self.type = value + elif name == 'TTL': + self.ttl = value + elif name == 'Value': + self.resource_records.append(value) + elif name == 'HostedZoneId': + self.alias_hosted_zone_id = value + elif name == 'DNSName': + self.alias_dns_name = value + elif name == 'SetIdentifier': + self.identifier = value + elif name == 'EvaluateTargetHealth': + self.alias_evaluate_target_health = value.lower() == 'true' + elif name == 'Weight': + self.weight = value + elif name == 'Region': + self.region = value + elif name == 'Failover': + self.failover = value + elif name == 'HealthCheckId': + self.health_check = value + + def startElement(self, name, attrs, connection): + return None diff --git a/ext/boto/route53/status.py b/ext/boto/route53/status.py new file mode 100644 index 0000000000..782372a811 --- /dev/null +++ b/ext/boto/route53/status.py @@ -0,0 +1,42 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Status(object): + def __init__(self, route53connection, change_dict): + self.route53connection = route53connection + for key in change_dict: + if key == 'Id': + self.__setattr__(key.lower(), + change_dict[key].replace('/change/', '')) + else: + self.__setattr__(key.lower(), change_dict[key]) + + def update(self): + """ Update the status of this request.""" + status = self.route53connection.get_change(self.id)['GetChangeResponse']['ChangeInfo']['Status'] + self.status = status + return status + + def __repr__(self): + return '' % self.status diff --git a/ext/boto/route53/zone.py b/ext/boto/route53/zone.py new file mode 100644 index 0000000000..b21c8de409 --- /dev/null +++ b/ext/boto/route53/zone.py @@ -0,0 +1,419 @@ +# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton +# www.bluepines.org +# Copyright (c) 2012 42 Lines Inc., Jim Browne +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +default_ttl = 60 + +import copy +from boto.exception import TooManyRecordsException +from boto.route53.record import ResourceRecordSets +from boto.route53.status import Status + + +class Zone(object): + """ + A Route53 Zone. + + :ivar route53connection: A :class:`boto.route53.connection.Route53Connection` connection + :ivar id: The ID of the hosted zone + """ + def __init__(self, route53connection, zone_dict): + self.route53connection = route53connection + for key in zone_dict: + if key == 'Id': + self.id = zone_dict['Id'].replace('/hostedzone/', '') + else: + self.__setattr__(key.lower(), zone_dict[key]) + + def __repr__(self): + return '' % self.name + + def _commit(self, changes): + """ + Commit a set of changes and return the ChangeInfo portion of + the response. + + :type changes: ResourceRecordSets + :param changes: changes to be committed + """ + response = changes.commit() + return response['ChangeResourceRecordSetsResponse']['ChangeInfo'] + + def _new_record(self, changes, resource_type, name, value, ttl, identifier, + comment=""): + """ + Add a CREATE change record to an existing ResourceRecordSets + + :type changes: ResourceRecordSets + :param changes: change set to append to + + :type name: str + :param name: The name of the resource record you want to + perform the action on. + + :type resource_type: str + :param resource_type: The DNS record type + + :param value: Appropriate value for resource_type + + :type ttl: int + :param ttl: The resource record cache time to live (TTL), in seconds. + + :type identifier: tuple + :param identifier: A tuple for setting WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + identifier = identifier[0] + except: + region = identifier[1] + identifier = identifier[0] + change = changes.add_change("CREATE", name, resource_type, ttl, + identifier=identifier, weight=weight, + region=region) + if type(value) in [list, tuple, set]: + for record in value: + change.add_value(record) + else: + change.add_value(value) + + def add_record(self, resource_type, name, value, ttl=60, identifier=None, + comment=""): + """ + Add a new record to this Zone. See _new_record for parameter + documentation. Returns a Status object. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + self._new_record(changes, resource_type, name, value, ttl, identifier, + comment) + return Status(self.route53connection, self._commit(changes)) + + def update_record(self, old_record, new_value, new_ttl=None, + new_identifier=None, comment=""): + """ + Update an existing record in this Zone. Returns a Status object. + + :type old_record: ResourceRecord + :param old_record: A ResourceRecord (e.g. returned by find_records) + + See _new_record for additional parameter documentation. + """ + new_ttl = new_ttl or default_ttl + record = copy.copy(old_record) + changes = ResourceRecordSets(self.route53connection, self.id, comment) + changes.add_change_record("DELETE", record) + self._new_record(changes, record.type, record.name, + new_value, new_ttl, new_identifier, comment) + return Status(self.route53connection, self._commit(changes)) + + def delete_record(self, record, comment=""): + """ + Delete one or more records from this Zone. Returns a Status object. + + :param record: A ResourceRecord (e.g. returned by + find_records) or list, tuple, or set of ResourceRecords. + + :type comment: str + :param comment: A comment that will be stored with the change. + """ + changes = ResourceRecordSets(self.route53connection, self.id, comment) + if type(record) in [list, tuple, set]: + for r in record: + changes.add_change_record("DELETE", r) + else: + changes.add_change_record("DELETE", record) + return Status(self.route53connection, self._commit(changes)) + + def add_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new CNAME record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + return self.add_record(resource_type='CNAME', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Add a new A record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + name = self.route53connection._make_qualified(name) + return self.add_record(resource_type='A', + name=name, + value=value, + ttl=ttl, + identifier=identifier, + comment=comment) + + def add_mx(self, name, records, ttl=None, identifier=None, comment=""): + """ + Add a new MX record to this Zone. See _new_record for + parameter documentation. Returns a Status object. + """ + ttl = ttl or default_ttl + records = self.route53connection._make_qualified(records) + return self.add_record(resource_type='MX', + name=name, + value=records, + ttl=ttl, + identifier=identifier, + comment=comment) + + def find_records(self, name, type, desired=1, all=False, identifier=None): + """ + Search this Zone for records that match given parameters. + Returns None if no results, a ResourceRecord if one result, or + a ResourceRecordSets if more than one result. + + :type name: str + :param name: The name of the records should match this parameter + + :type type: str + :param type: The type of the records should match this parameter + + :type desired: int + :param desired: The number of desired results. If the number of + matching records in the Zone exceeds the value of this parameter, + throw TooManyRecordsException + + :type all: Boolean + :param all: If true return all records that match name, type, and + identifier parameters + + :type identifier: Tuple + :param identifier: A tuple specifying WRR or LBR attributes. Valid + forms are: + + * (str, int): WRR record [e.g. ('foo',10)] + * (str, str): LBR record [e.g. ('foo','us-east-1') + + """ + name = self.route53connection._make_qualified(name) + returned = self.route53connection.get_all_rrsets(self.id, name=name, + type=type) + + # name/type for get_all_rrsets sets the starting record; they + # are not a filter + results = [] + for r in returned: + if r.name == name and r.type == type: + results.append(r) + # Is at the end of the list of matched records. No need to continue + # since the records are sorted by name and type. + else: + break + + weight = None + region = None + if identifier is not None: + try: + int(identifier[1]) + weight = identifier[1] + except: + region = identifier[1] + + if weight is not None: + results = [r for r in results if (r.weight == weight and + r.identifier == identifier[0])] + if region is not None: + results = [r for r in results if (r.region == region and + r.identifier == identifier[0])] + + if ((not all) and (len(results) > desired)): + message = "Search: name %s type %s" % (name, type) + message += "\nFound: " + message += ", ".join(["%s %s %s" % (r.name, r.type, r.to_print()) + for r in results]) + raise TooManyRecordsException(message) + elif len(results) > 1: + return results + elif len(results) == 1: + return results[0] + else: + return None + + def get_cname(self, name, all=False): + """ + Search this Zone for CNAME records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'CNAME', all=all) + + def get_a(self, name, all=False): + """ + Search this Zone for A records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'A', all=all) + + def get_mx(self, name, all=False): + """ + Search this Zone for MX records that match name. + + Returns a ResourceRecord. + + If there is more than one match return all as a + ResourceRecordSets if all is True, otherwise throws + TooManyRecordsException. + """ + return self.find_records(name, 'MX', all=all) + + def update_cname(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given CNAME record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_cname(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_a(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given A record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + old_record = self.get_a(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def update_mx(self, name, value, ttl=None, identifier=None, comment=""): + """ + Update the given MX record in this Zone to a new value, ttl, + and identifier. Returns a Status object. + + Will throw TooManyRecordsException is name, value does not match + a single record. + """ + name = self.route53connection._make_qualified(name) + value = self.route53connection._make_qualified(value) + old_record = self.get_mx(name) + ttl = ttl or old_record.ttl + return self.update_record(old_record, + new_value=value, + new_ttl=ttl, + new_identifier=identifier, + comment=comment) + + def delete_cname(self, name, identifier=None, all=False): + """ + Delete a CNAME record matching name and identifier from + this Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'CNAME', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_a(self, name, identifier=None, all=False): + """ + Delete an A record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'A', identifier=identifier, + all=all) + return self.delete_record(record) + + def delete_mx(self, name, identifier=None, all=False): + """ + Delete an MX record matching name and identifier from this + Zone. Returns a Status object. + + If there is more than one match delete all matching records if + all is True, otherwise throws TooManyRecordsException. + """ + name = self.route53connection._make_qualified(name) + record = self.find_records(name, 'MX', identifier=identifier, + all=all) + return self.delete_record(record) + + def get_records(self): + """ + Return a ResourceRecordsSets for all of the records in this zone. + """ + return self.route53connection.get_all_rrsets(self.id) + + def delete(self): + """ + Request that this zone be deleted by Amazon. + """ + self.route53connection.delete_hosted_zone(self.id) + + def get_nameservers(self): + """ Get the list of nameservers for this zone.""" + ns = self.find_records(self.name, 'NS') + if ns is not None: + ns = ns.resource_records + return ns diff --git a/ext/boto/s3/__init__.py b/ext/boto/s3/__init__.py new file mode 100644 index 0000000000..0898cd8d2e --- /dev/null +++ b/ext/boto/s3/__init__.py @@ -0,0 +1,75 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2014, Steven Richards +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +class S3RegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the connection + class's constructor as keyword arguments and they will be + passed along to the connection object. + + :rtype: Connection object + :return: The connection to this regions endpoint + """ + if self.connection_cls: + return self.connection_cls(host=self.endpoint, **kw_params) + + +def regions(): + """ + Get all available regions for the Amazon S3 service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.s3.connection import S3Connection + return get_regions( + 's3', + region_cls=S3RegionInfo, + connection_cls=S3Connection + ) + + +def connect_to_region(region_name, **kw_params): + from boto.s3.connection import S3Connection + if 'host' in kw_params: + host = kw_params.pop('host') + if host not in ['', None]: + region = S3RegionInfo( + name='custom', + endpoint=host, + connection_cls=S3Connection + ) + return region.connect(**kw_params) + + return connect('s3', region_name, region_cls=S3RegionInfo, + connection_cls=S3Connection, **kw_params) diff --git a/ext/boto/s3/acl.py b/ext/boto/s3/acl.py new file mode 100644 index 0000000000..9d73ddfec9 --- /dev/null +++ b/ext/boto/s3/acl.py @@ -0,0 +1,171 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +CannedACLStrings = ['private', 'public-read', + 'public-read-write', 'authenticated-read', + 'bucket-owner-read', 'bucket-owner-full-control', + 'log-delivery-write'] + + +class Policy(object): + + def __init__(self, parent=None): + self.parent = parent + self.namespace = None + self.acl = None + + def __repr__(self): + grants = [] + for g in self.acl.grants: + if g.id == self.owner.id: + grants.append("%s (owner) = %s" % (g.display_name, g.permission)) + else: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % ", ".join(grants) + + def startElement(self, name, attrs, connection): + if name == 'AccessControlPolicy': + self.namespace = attrs.get('xmlns', None) + return None + if name == 'Owner': + self.owner = User(self) + return self.owner + elif name == 'AccessControlList': + self.acl = ACL(self) + return self.acl + else: + return None + + def endElement(self, name, value, connection): + if name == 'Owner': + pass + elif name == 'AccessControlList': + pass + else: + setattr(self, name, value) + + def to_xml(self): + if self.namespace is not None: + s = ''.format(self.namespace) + else: + s = '' + s += self.owner.to_xml() + s += self.acl.to_xml() + s += '' + return s + + +class ACL(object): + + def __init__(self, policy=None): + self.policy = policy + self.grants = [] + + def add_grant(self, grant): + self.grants.append(grant) + + def add_email_grant(self, permission, email_address): + grant = Grant(permission=permission, type='AmazonCustomerByEmail', + email_address=email_address) + self.grants.append(grant) + + def add_user_grant(self, permission, user_id, display_name=None): + grant = Grant(permission=permission, type='CanonicalUser', id=user_id, display_name=display_name) + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant(self)) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Grant': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for grant in self.grants: + s += grant.to_xml() + s += '' + return s + + +class Grant(object): + + NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' + + def __init__(self, permission=None, type=None, id=None, + display_name=None, uri=None, email_address=None): + self.permission = permission + self.id = id + self.display_name = display_name + self.uri = uri + self.email_address = email_address + self.type = type + + def startElement(self, name, attrs, connection): + if name == 'Grantee': + self.type = attrs['xsi:type'] + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'DisplayName': + self.display_name = value + elif name == 'URI': + self.uri = value + elif name == 'EmailAddress': + self.email_address = value + elif name == 'Grantee': + pass + elif name == 'Permission': + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += '' % (self.NameSpace, self.type) + if self.type == 'CanonicalUser': + s += '%s' % self.id + s += '%s' % self.display_name + elif self.type == 'Group': + s += '%s' % self.uri + else: + s += '%s' % self.email_address + s += '' + s += '%s' % self.permission + s += '' + return s diff --git a/ext/boto/s3/bucket.py b/ext/boto/s3/bucket.py new file mode 100644 index 0000000000..88fb29ff99 --- /dev/null +++ b/ext/boto/s3/bucket.py @@ -0,0 +1,1878 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.exception import BotoClientError +from boto.s3.acl import Policy, CannedACLStrings, Grant +from boto.s3.key import Key +from boto.s3.prefix import Prefix +from boto.s3.deletemarker import DeleteMarker +from boto.s3.multipart import MultiPartUpload +from boto.s3.multipart import CompleteMultiPartUpload +from boto.s3.multidelete import MultiDeleteResult +from boto.s3.multidelete import Error +from boto.s3.bucketlistresultset import BucketListResultSet +from boto.s3.bucketlistresultset import VersionedBucketListResultSet +from boto.s3.bucketlistresultset import MultiPartUploadListResultSet +from boto.s3.lifecycle import Lifecycle +from boto.s3.tagging import Tags +from boto.s3.cors import CORSConfiguration +from boto.s3.bucketlogging import BucketLogging +from boto.s3 import website +import boto.jsonresponse +import boto.utils +import xml.sax +import xml.sax.saxutils +import re +import base64 +from collections import defaultdict +from boto.compat import BytesIO, six, StringIO, urllib + +# as per http://goo.gl/BDuud (02/19/2011) + + +class S3WebsiteEndpointTranslate(object): + + trans_region = defaultdict(lambda: 's3-website-us-east-1') + trans_region['eu-west-1'] = 's3-website-eu-west-1' + trans_region['eu-central-1'] = 's3-website.eu-central-1' + trans_region['us-west-1'] = 's3-website-us-west-1' + trans_region['us-west-2'] = 's3-website-us-west-2' + trans_region['sa-east-1'] = 's3-website-sa-east-1' + trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' + trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' + trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2' + trans_region['cn-north-1'] = 's3-website.cn-north-1' + + @classmethod + def translate_region(self, reg): + return self.trans_region[reg] + +S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] + + +class Bucket(object): + + LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' + + BucketPaymentBody = """ + + %s + """ + + VersioningBody = """ + + %s + %s + """ + + VersionRE = '([A-Za-z]+)' + MFADeleteRE = '([A-Za-z]+)' + + def __init__(self, connection=None, name=None, key_class=Key): + self.name = name + self.connection = connection + self.key_class = key_class + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __contains__(self, key_name): + return not (self.get_key(key_name) is None) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def set_key_class(self, key_class): + """ + Set the Key class associated with this bucket. By default, this + would be the boto.s3.key.Key class but if you want to subclass that + for some reason this allows you to associate your new class with a + bucket so that when you call bucket.new_key() or when you get a listing + of keys in the bucket you will get an instances of your key class + rather than the default. + + :type key_class: class + :param key_class: A subclass of Key that can be more specific + """ + self.key_class = key_class + + def lookup(self, key_name, headers=None): + """ + Deprecated: Please use get_key method. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + return self.get_key(key_name, headers=headers) + + def get_key(self, key_name, headers=None, version_id=None, + response_headers=None, validate=True): + """ + Check to see if a particular key exists within the bucket. This + method uses a HEAD request to check for the existence of the key. + Returns: An instance of a Key object or None + + :param key_name: The name of the key to retrieve + :type key_name: string + + :param headers: The headers to send when retrieving the key + :type headers: dict + + :param version_id: + :type version_id: string + + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + :type response_headers: dict + + :param validate: Verifies whether the key exists. If ``False``, this + will not hit the service, constructing an in-memory object. + Default is ``True``. + :type validate: bool + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + if validate is False: + if headers or version_id or response_headers: + raise BotoClientError( + "When providing 'validate=False', no other params " + \ + "are allowed." + ) + + # This leans on the default behavior of ``new_key`` (not hitting + # the service). If that changes, that behavior should migrate here. + return self.new_key(key_name) + + query_args_l = [] + if version_id: + query_args_l.append('versionId=%s' % version_id) + if response_headers: + for rk, rv in six.iteritems(response_headers): + query_args_l.append('%s=%s' % (rk, urllib.parse.quote(rv))) + + key, resp = self._get_key_internal(key_name, headers, query_args_l) + return key + + def _get_key_internal(self, key_name, headers, query_args_l): + query_args = '&'.join(query_args_l) or None + response = self.connection.make_request('HEAD', self.name, key_name, + headers=headers, + query_args=query_args) + response.read() + # Allow any success status (2xx) - for example this lets us + # support Range gets, which return status 206: + if response.status / 100 == 2: + k = self.key_class(self) + provider = self.connection.provider + k.metadata = boto.utils.get_aws_metadata(response.msg, provider) + for field in Key.base_fields: + k.__dict__[field.lower().replace('-', '_')] = \ + response.getheader(field) + # the following machinations are a workaround to the fact that + # apache/fastcgi omits the content-length header on HEAD + # requests when the content-length is zero. + # See http://goo.gl/0Tdax for more details. + clen = response.getheader('content-length') + if clen: + k.size = int(response.getheader('content-length')) + else: + k.size = 0 + k.name = key_name + k.handle_version_headers(response) + k.handle_encryption_headers(response) + k.handle_restore_headers(response) + k.handle_storage_class_header(response) + k.handle_addl_headers(response.getheaders()) + return k, response + else: + if response.status == 404: + return None, response + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, '') + + def list(self, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): + """ + List key objects within a bucket. This returns an instance of an + BucketListResultSet that automatically handles all of the result + paging, etc. from S3. You just need to keep iterating until + there are no more results. + + Called with no arguments, this will return an iterator object across + all keys within the bucket. + + The Key objects returned by the iterator are obtained by parsing + the results of a GET on the bucket, also known as the List Objects + request. The XML returned by this request contains only a subset + of the information about each key. Certain metadata fields such + as Content-Type and user metadata are not available in the XML. + Therefore, if you want these additional metadata fields you will + have to do a HEAD request on the Key in the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See http://goo.gl/Xx63h for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return BucketListResultSet(self, prefix, delimiter, marker, headers, + encoding_type=encoding_type) + + def list_versions(self, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None, encoding_type=None): + """ + List version objects within a bucket. This returns an + instance of an VersionedBucketListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. Called + with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle through + the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + + http://aws.amazon.com/releasenotes/Amazon-S3/213 + + for more details. + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return VersionedBucketListResultSet(self, prefix, delimiter, + key_marker, version_id_marker, + headers, + encoding_type=encoding_type) + + def list_multipart_uploads(self, key_marker='', + upload_id_marker='', + headers=None, encoding_type=None): + """ + List multipart upload objects within a bucket. This returns an + instance of an MultiPartUploadListResultSet that automatically + handles all of the result paging, etc. from S3. You just need + to keep iterating until there are no more results. + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + + :type upload_id_marker: string + :param upload_id_marker: The upload identifier + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: :class:`boto.s3.bucketlistresultset.MultiPartUploadListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return MultiPartUploadListResultSet(self, key_marker, + upload_id_marker, + headers, + encoding_type=encoding_type) + + def _get_all_query_args(self, params, initial_query_string=''): + pairs = [] + + if initial_query_string: + pairs.append(initial_query_string) + + for key, value in sorted(params.items(), key=lambda x: x[0]): + if value is None: + continue + key = key.replace('_', '-') + if key == 'maxkeys': + key = 'max-keys' + if not isinstance(value, six.string_types + (six.binary_type,)): + value = six.text_type(value) + if not isinstance(value, six.binary_type): + value = value.encode('utf-8') + if value: + pairs.append(u'%s=%s' % ( + urllib.parse.quote(key), + urllib.parse.quote(value) + )) + + return '&'.join(pairs) + + def _get_all(self, element_map, initial_query_string='', + headers=None, **params): + query_args = self._get_all_query_args( + params, + initial_query_string=initial_query_string + ) + response = self.connection.make_request('GET', self.name, + headers=headers, + query_args=query_args) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet(element_map) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def validate_kwarg_names(self, kwargs, names): + """ + Checks that all named arguments are in the specified list of names. + + :type kwargs: dict + :param kwargs: Dictionary of kwargs to validate. + + :type names: list + :param names: List of possible named arguments. + """ + for kwarg in kwargs: + if kwarg not in names: + raise TypeError('Invalid argument "%s"!' % kwarg) + + def get_all_keys(self, headers=None, **params): + """ + A lower-level method for listing contents of a bucket. This + closely models the actual S3 API and requires you to manually + handle the paging of results. For a higher-level method that + handles the details of paging for you, you can use the list + method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + + """ + self.validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', + 'marker', 'delimiter', + 'encoding_type']) + return self._get_all([('Contents', self.key_class), + ('CommonPrefixes', Prefix)], + '', headers, **params) + + def get_all_versions(self, headers=None, **params): + """ + A lower-level, version-aware method for listing contents of a + bucket. This closely models the actual S3 API and requires + you to manually handle the paging of results. For a + higher-level method that handles the details of paging for + you, you can use the list method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + with respect to keys. + + :type version_id_marker: string + :param version_id_marker: The "marker" of where you are in the result + set with respect to version-id's. + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that contain the + same string between the prefix and the first occurrence of + the delimiter will be rolled up into a single result + element in the CommonPrefixes collection. These rolled-up + keys are not returned elsewhere in the response. + + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + :type encoding_type: string + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + """ + self.validate_get_all_versions_params(params) + return self._get_all([('Version', self.key_class), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker)], + 'versions', headers, **params) + + def validate_get_all_versions_params(self, params): + """ + Validate that the parameters passed to get_all_versions are valid. + Overridden by subclasses that allow a different set of parameters. + + :type params: dict + :param params: Parameters to validate. + """ + self.validate_kwarg_names( + params, ['maxkeys', 'max_keys', 'prefix', 'key_marker', + 'version_id_marker', 'delimiter', 'encoding_type']) + + def get_all_multipart_uploads(self, headers=None, **params): + """ + A lower-level, version-aware method for listing active + MultiPart uploads for a bucket. This closely models the + actual S3 API and requires you to manually handle the paging + of results. For a higher-level method that handles the + details of paging for you, you can use the list method. + + :type max_uploads: int + :param max_uploads: The maximum number of uploads to retrieve. + Default value is 1000. + + :type key_marker: string + :param key_marker: Together with upload_id_marker, this + parameter specifies the multipart upload after which + listing should begin. If upload_id_marker is not + specified, only the keys lexicographically greater than + the specified key_marker will be included in the list. + + If upload_id_marker is specified, any multipart uploads + for a key equal to the key_marker might also be included, + provided those multipart uploads have upload IDs + lexicographically greater than the specified + upload_id_marker. + + :type upload_id_marker: string + :param upload_id_marker: Together with key-marker, specifies + the multipart upload after which listing should begin. If + key_marker is not specified, the upload_id_marker + parameter is ignored. Otherwise, any multipart uploads + for a key equal to the key_marker might be included in the + list only if they have an upload ID lexicographically + greater than the specified upload_id_marker. + + :type encoding_type: string + :param encoding_type: Requests Amazon S3 to encode the response and + specifies the encoding method to use. + + An object key can contain any Unicode character; however, XML 1.0 + parser cannot parse some characters, such as characters with an + ASCII value from 0 to 10. For characters that are not supported in + XML 1.0, you can add this parameter to request that Amazon S3 + encode the keys in the response. + + Valid options: ``url`` + + :type delimiter: string + :param delimiter: Character you use to group keys. + All keys that contain the same string between the prefix, if + specified, and the first occurrence of the delimiter after the + prefix are grouped under a single result element, CommonPrefixes. + If you don't specify the prefix parameter, then the substring + starts at the beginning of the key. The keys that are grouped + under CommonPrefixes result element are not returned elsewhere + in the response. + + :type prefix: string + :param prefix: Lists in-progress uploads only for those keys that + begin with the specified prefix. You can use prefixes to separate + a bucket into different grouping of keys. (You can think of using + prefix to make groups in the same way you'd use a folder in a + file system.) + + :rtype: ResultSet + :return: The result from S3 listing the uploads requested + + """ + self.validate_kwarg_names(params, ['max_uploads', 'key_marker', + 'upload_id_marker', 'encoding_type', + 'delimiter', 'prefix']) + return self._get_all([('Upload', MultiPartUpload), + ('CommonPrefixes', Prefix)], + 'uploads', headers, **params) + + def new_key(self, key_name=None): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self.key_class(self, key_name) + + def generate_url(self, expires_in, method='GET', headers=None, + force_http=False, response_headers=None, + expires_in_absolute=False): + return self.connection.generate_url(expires_in, method, self.name, + headers=headers, + force_http=force_http, + response_headers=response_headers, + expires_in_absolute=expires_in_absolute) + + def delete_keys(self, keys, quiet=False, mfa_token=None, headers=None): + """ + Deletes a set of keys using S3's Multi-object delete API. If a + VersionID is specified for that key then that version is removed. + Returns a MultiDeleteResult Object, which contains Deleted + and Error elements for each key you ask to delete. + + :type keys: list + :param keys: A list of either key_names or (key_name, versionid) pairs + or a list of Key instances. + + :type quiet: boolean + :param quiet: In quiet mode the response includes only keys + where the delete operation encountered an error. For a + successful deletion, the operation does not return any + information about the delete in the response body. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :returns: An instance of MultiDeleteResult + """ + ikeys = iter(keys) + result = MultiDeleteResult(self) + provider = self.connection.provider + query_args = 'delete' + + def delete_keys2(hdrs): + hdrs = hdrs or {} + data = u"""""" + data += u"" + if quiet: + data += u"true" + count = 0 + while count < 1000: + try: + key = next(ikeys) + except StopIteration: + break + if isinstance(key, six.string_types): + key_name = key + version_id = None + elif isinstance(key, tuple) and len(key) == 2: + key_name, version_id = key + elif (isinstance(key, Key) or isinstance(key, DeleteMarker)) and key.name: + key_name = key.name + version_id = key.version_id + else: + if isinstance(key, Prefix): + key_name = key.name + code = 'PrefixSkipped' # Don't delete Prefix + else: + key_name = repr(key) # try get a string + code = 'InvalidArgument' # other unknown type + message = 'Invalid. No delete action taken for this object.' + error = Error(key_name, code=code, message=message) + result.errors.append(error) + continue + count += 1 + data += u"%s" % xml.sax.saxutils.escape(key_name) + if version_id: + data += u"%s" % version_id + data += u"" + data += u"" + if count <= 0: + return False # no more + data = data.encode('utf-8') + fp = BytesIO(data) + md5 = boto.utils.compute_md5(fp) + hdrs['Content-MD5'] = md5[1] + hdrs['Content-Type'] = 'text/xml' + if mfa_token: + hdrs[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('POST', self.name, + headers=hdrs, + query_args=query_args, + data=data) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(result, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return count >= 1000 # more? + else: + raise provider.storage_response_error(response.status, + response.reason, + body) + while delete_keys2(headers): + pass + return result + + def delete_key(self, key_name, headers=None, version_id=None, + mfa_token=None): + """ + Deletes a key from the bucket. If a version_id is provided, + only that version of the key will be deleted. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: The version ID (optional) + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required anytime you are deleting versioned objects from a + bucket that has the MFADelete option on the bucket. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: A key object holding information on what was + deleted. The Caller can see if a delete_marker was + created or removed and what version_id the delete created + or removed. + """ + if not key_name: + raise ValueError('Empty key names are not allowed') + return self._delete_key_internal(key_name, headers=headers, + version_id=version_id, + mfa_token=mfa_token, + query_args_l=None) + + def _delete_key_internal(self, key_name, headers=None, version_id=None, + mfa_token=None, query_args_l=None): + query_args_l = query_args_l or [] + provider = self.connection.provider + if version_id: + query_args_l.append('versionId=%s' % version_id) + query_args = '&'.join(query_args_l) or None + if mfa_token: + if not headers: + headers = {} + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('DELETE', self.name, key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status != 204: + raise provider.storage_response_error(response.status, + response.reason, body) + else: + # return a key object with information on what was deleted. + k = self.key_class(self) + k.name = key_name + k.handle_version_headers(response) + k.handle_addl_headers(response.getheaders()) + return k + + def copy_key(self, new_key_name, src_bucket_name, + src_key_name, metadata=None, src_version_id=None, + storage_class='STANDARD', preserve_acl=False, + encrypt_key=False, headers=None, query_args=None): + """ + Create a new key in the bucket by copying another existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_version_id: string + :param src_version_id: The version id for the key. This param + is optional. If not specified, the newest version of the + key will be copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type storage_class: string + :param storage_class: The storage class of the new key. By + default, the new key will use the standard storage class. + Possible values are: STANDARD | REDUCED_REDUNDANCY + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type headers: dict + :param headers: A dictionary of header name/value pairs. + + :type query_args: string + :param query_args: A string of additional querystring arguments + to append to the request + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + headers = headers or {} + provider = self.connection.provider + src_key_name = boto.utils.get_utf8_value(src_key_name) + if preserve_acl: + if self.name == src_bucket_name: + src_bucket = self + else: + src_bucket = self.connection.get_bucket( + src_bucket_name, validate=False) + acl = src_bucket.get_xml_acl(src_key_name) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + src = '%s/%s' % (src_bucket_name, urllib.parse.quote(src_key_name)) + if src_version_id: + src += '?versionId=%s' % src_version_id + headers[provider.copy_source_header] = str(src) + # make sure storage_class_header key exists before accessing it + if provider.storage_class_header and storage_class: + headers[provider.storage_class_header] = storage_class + if metadata is not None: + headers[provider.metadata_directive_header] = 'REPLACE' + headers = boto.utils.merge_meta(headers, metadata, provider) + elif not query_args: # Can't use this header with multi-part copy. + headers[provider.metadata_directive_header] = 'COPY' + response = self.connection.make_request('PUT', self.name, new_key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status == 200: + key = self.new_key(new_key_name) + h = handler.XmlHandler(key, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + if hasattr(key, 'Error'): + raise provider.storage_copy_error(key.Code, key.Message, body) + key.handle_version_headers(response) + key.handle_addl_headers(response.getheaders()) + if preserve_acl: + self.set_xml_acl(acl, new_key_name) + return key + else: + raise provider.storage_response_error(response.status, + response.reason, body) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None): + assert acl_str in CannedACLStrings + + if headers: + headers[self.connection.provider.acl_header] = acl_str + else: + headers = {self.connection.provider.acl_header: acl_str} + + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_xml_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None, + query_args='acl'): + if version_id: + query_args += '&versionId=%s' % version_id + if not isinstance(acl_str, bytes): + acl_str = acl_str.encode('utf-8') + response = self.connection.make_request('PUT', self.name, key_name, + data=acl_str, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): + if isinstance(acl_or_str, Policy): + self.set_xml_acl(acl_or_str.to_xml(), key_name, + headers, version_id) + else: + self.set_canned_acl(acl_or_str, key_name, + headers, version_id) + + def get_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status == 200: + policy = Policy(self) + h = handler.XmlHandler(policy, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return policy + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_subresource(self, subresource, value, key_name='', headers=None, + version_id=None): + """ + Set a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to set. + + :type value: string + :param value: The value of the subresource. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + """ + if not subresource: + raise TypeError('set_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + if not isinstance(value, bytes): + value = value.encode('utf-8') + response = self.connection.make_request('PUT', self.name, key_name, + data=value, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_subresource(self, subresource, key_name='', headers=None, + version_id=None): + """ + Get a subresource for a bucket or key. + + :type subresource: string + :param subresource: The subresource to get. + + :type key_name: string + :param key_name: The key to operate on, or None to operate on the + bucket. + + :type headers: dict + :param headers: Additional HTTP headers to include in the request. + + :type src_version_id: string + :param src_version_id: Optional. The version id of the key to + operate on. If not specified, operate on the newest + version. + + :rtype: string + :returns: The value of the subresource. + """ + if not subresource: + raise TypeError('get_subresource called with subresource=None') + query_args = subresource + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def make_public(self, recursive=False, headers=None): + self.set_canned_acl('public-read', headers=headers) + if recursive: + for key in self: + self.set_canned_acl('public-read', key.name, headers=headers) + + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + def add_user_grant(self, permission, user_id, recursive=False, + headers=None, display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a bucket. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + if permission not in S3Permissions: + raise self.connection.provider.storage_permissions_error( + 'Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers, + display_name=display_name) + + def list_grants(self, headers=None): + policy = self.get_acl(headers=headers) + return policy.acl.grants + + def get_location(self): + """ + Returns the LocationConstraint for the bucket. + + :rtype: str + :return: The LocationConstraint for the bucket or the empty + string if no constraint was specified when bucket was created. + """ + response = self.connection.make_request('GET', self.name, + query_args='location') + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs.LocationConstraint + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_logging(self, logging_str, headers=None): + """ + Set logging on a bucket directly to the given xml string. + + :type logging_str: unicode string + :param logging_str: The XML for the bucketloggingstatus which + will be set. The string will be converted to utf-8 before + it is sent. Usually, you will obtain this XML from the + BucketLogging object. + + :rtype: bool + :return: True if ok or raises an exception. + """ + body = logging_str + if not isinstance(body, bytes): + body = body.encode('utf-8') + response = self.connection.make_request('PUT', self.name, data=body, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def enable_logging(self, target_bucket, target_prefix='', + grants=None, headers=None): + """ + Enable logging on a bucket. + + :type target_bucket: bucket or string + :param target_bucket: The bucket to log to. + + :type target_prefix: string + :param target_prefix: The prefix which should be prepended to the + generated log files written to the target_bucket. + + :type grants: list of Grant objects + :param grants: A list of extra permissions which will be granted on + the log files which are created. + + :rtype: bool + :return: True if ok or raises an exception. + """ + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + blogging = BucketLogging(target=target_bucket, prefix=target_prefix, + grants=grants) + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def disable_logging(self, headers=None): + """ + Disable logging on a bucket. + + :rtype: bool + :return: True if ok or raises an exception. + """ + blogging = BucketLogging() + return self.set_xml_logging(blogging.to_xml(), headers=headers) + + def get_logging_status(self, headers=None): + """ + Get the logging status for this bucket. + + :rtype: :class:`boto.s3.bucketlogging.BucketLogging` + :return: A BucketLogging object for this bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + blogging = BucketLogging() + h = handler.XmlHandler(blogging, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return blogging + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_as_logging_target(self, headers=None): + """ + Setup the current bucket as a logging target by granting the necessary + permissions to the LogDelivery group to write log files to this bucket. + """ + policy = self.get_acl(headers=headers) + g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) + g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) + policy.acl.add_grant(g1) + policy.acl.add_grant(g2) + self.set_acl(policy, headers=headers) + + def get_request_payment(self, headers=None): + response = self.connection.make_request('GET', self.name, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_request_payment(self, payer='BucketOwner', headers=None): + body = self.BucketPaymentBody % payer + response = self.connection.make_request('PUT', self.name, data=body, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_versioning(self, versioning, mfa_delete=False, + mfa_token=None, headers=None): + """ + Configure versioning for this bucket. + + ..note:: This feature is currently in beta. + + :type versioning: bool + :param versioning: A boolean indicating whether version is + enabled (True) or disabled (False). + + :type mfa_delete: bool + :param mfa_delete: A boolean indicating whether the + Multi-Factor Authentication Delete feature is enabled + (True) or disabled (False). If mfa_delete is enabled then + all Delete operations will require the token from your MFA + device to be passed in the request. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial + number from the MFA device and the current value of the + six-digit token associated with the device. This value is + required when you are changing the status of the MfaDelete + property of the bucket. + """ + if versioning: + ver = 'Enabled' + else: + ver = 'Suspended' + if mfa_delete: + mfa = 'Enabled' + else: + mfa = 'Disabled' + body = self.VersioningBody % (ver, mfa) + if mfa_token: + if not headers: + headers = {} + provider = self.connection.provider + headers[provider.mfa_header] = ' '.join(mfa_token) + response = self.connection.make_request('PUT', self.name, data=body, + query_args='versioning', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_versioning_status(self, headers=None): + """ + Returns the current status of versioning on the bucket. + + :rtype: dict + :returns: A dictionary containing a key named 'Versioning' + that can have a value of either Enabled, Disabled, or + Suspended. Also, if MFADelete has ever been enabled on the + bucket, the dictionary will contain a key named + 'MFADelete' which will have a value of either Enabled or + Suspended. + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', headers=headers) + body = response.read() + if not isinstance(body, six.string_types): + body = body.decode('utf-8') + boto.log.debug(body) + if response.status == 200: + d = {} + ver = re.search(self.VersionRE, body) + if ver: + d['Versioning'] = ver.group(1) + mfa = re.search(self.MFADeleteRE, body) + if mfa: + d['MfaDelete'] = mfa.group(1) + return d + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_lifecycle(self, lifecycle_config, headers=None): + """ + Configure lifecycle for this bucket. + + :type lifecycle_config: :class:`boto.s3.lifecycle.Lifecycle` + :param lifecycle_config: The lifecycle configuration you want + to configure for this bucket. + """ + xml = lifecycle_config.to_xml() + #xml = xml.encode('utf-8') + fp = StringIO(xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='lifecycle', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_lifecycle_config(self, headers=None): + """ + Returns the current lifecycle configuration on the bucket. + + :rtype: :class:`boto.s3.lifecycle.Lifecycle` + :returns: A LifecycleConfig object that describes all current + lifecycle rules in effect for the bucket. + """ + response = self.connection.make_request('GET', self.name, + query_args='lifecycle', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + lifecycle = Lifecycle() + h = handler.XmlHandler(lifecycle, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return lifecycle + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_lifecycle_configuration(self, headers=None): + """ + Removes all lifecycle configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='lifecycle', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def configure_website(self, suffix=None, error_key=None, + redirect_all_requests_to=None, + routing_rules=None, + headers=None): + """ + Configure this bucket to act as a website + + :type suffix: str + :param suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :type error_key: str + :param error_key: The object key name to use when a 4XX class + error occurs. This is optional. + + :type redirect_all_requests_to: :class:`boto.s3.website.RedirectLocation` + :param redirect_all_requests_to: Describes the redirect behavior for + every request to this bucket's website endpoint. If this value is + non None, no other values are considered when configuring the + website configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :type routing_rules: :class:`boto.s3.website.RoutingRules` + :param routing_rules: Object which specifies conditions + and redirects that apply when the conditions are met. + + """ + config = website.WebsiteConfiguration( + suffix, error_key, redirect_all_requests_to, + routing_rules) + return self.set_website_configuration(config, headers=headers) + + def set_website_configuration(self, config, headers=None): + """ + :type config: boto.s3.website.WebsiteConfiguration + :param config: Configuration data + """ + return self.set_website_configuration_xml(config.to_xml(), + headers=headers) + + + def set_website_configuration_xml(self, xml, headers=None): + """Upload xml website configuration""" + response = self.connection.make_request('PUT', self.name, data=xml, + query_args='website', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_configuration(self, headers=None): + """ + Returns the current status of website configuration on the bucket. + + :rtype: dict + :returns: A dictionary containing a Python representation + of the XML response from S3. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that + is for a "directory" on the website endpoint + * ErrorDocument + + * Key : name of object to serve when an error occurs + + """ + return self.get_website_configuration_with_xml(headers)[0] + + def get_website_configuration_obj(self, headers=None): + """Get the website configuration as a + :class:`boto.s3.website.WebsiteConfiguration` object. + """ + config_xml = self.get_website_configuration_xml(headers=headers) + config = website.WebsiteConfiguration() + h = handler.XmlHandler(config, self) + xml.sax.parseString(config_xml, h) + return config + + def get_website_configuration_with_xml(self, headers=None): + """ + Returns the current status of website configuration on the bucket as + unparsed XML. + + :rtype: 2-Tuple + :returns: 2-tuple containing: + + 1) A dictionary containing a Python representation \ + of the XML response. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that \ + is for a "directory" on the website endpoint + + * ErrorDocument + + * Key : name of object to serve when an error occurs + + + 2) unparsed XML describing the bucket's website configuration + + """ + + body = self.get_website_configuration_xml(headers=headers) + e = boto.jsonresponse.Element() + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e, body + + def get_website_configuration_xml(self, headers=None): + """Get raw website configuration xml""" + response = self.connection.make_request('GET', self.name, + query_args='website', headers=headers) + body = response.read().decode('utf-8') + boto.log.debug(body) + + if response.status != 200: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return body + + def delete_website_configuration(self, headers=None): + """ + Removes all website configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='website', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_website_endpoint(self): + """ + Returns the fully qualified hostname to use is you want to access this + bucket as a website. This doesn't validate whether the bucket has + been correctly configured as a website or not. + """ + l = [self.name] + l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location())) + l.append('.'.join(self.connection.host.split('.')[-2:])) + return '.'.join(l) + + def get_policy(self, headers=None): + """ + Returns the JSON policy associated with the bucket. The policy + is returned as an uninterpreted JSON string. + """ + response = self.connection.make_request('GET', self.name, + query_args='policy', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_policy(self, policy, headers=None): + """ + Add or replace the JSON policy associated with the bucket. + + :type policy: str + :param policy: The JSON policy as a string. + """ + response = self.connection.make_request('PUT', self.name, + data=policy, + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete_policy(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + data='/?policy', + query_args='policy', + headers=headers) + body = response.read() + if response.status >= 200 and response.status <= 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors_xml(self, cors_xml, headers=None): + """ + Set the CORS (Cross-Origin Resource Sharing) for a bucket. + + :type cors_xml: str + :param cors_xml: The XML document describing your desired + CORS configuration. See the S3 documentation for details + of the exact syntax required. + """ + fp = StringIO(cors_xml) + md5 = boto.utils.compute_md5(fp) + if headers is None: + headers = {} + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('PUT', self.name, + data=fp.getvalue(), + query_args='cors', + headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_cors(self, cors_config, headers=None): + """ + Set the CORS for this bucket given a boto CORSConfiguration + object. + + :type cors_config: :class:`boto.s3.cors.CORSConfiguration` + :param cors_config: The CORS configuration you want + to configure for this bucket. + """ + return self.set_cors_xml(cors_config.to_xml()) + + def get_cors_xml(self, headers=None): + """ + Returns the current CORS configuration on the bucket as an + XML document. + """ + response = self.connection.make_request('GET', self.name, + query_args='cors', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def get_cors(self, headers=None): + """ + Returns the current CORS configuration on the bucket. + + :rtype: :class:`boto.s3.cors.CORSConfiguration` + :returns: A CORSConfiguration object that describes all current + CORS rules in effect for the bucket. + """ + body = self.get_cors_xml(headers) + cors = CORSConfiguration() + h = handler.XmlHandler(cors, self) + xml.sax.parseString(body, h) + return cors + + def delete_cors(self, headers=None): + """ + Removes all CORS configuration from the bucket. + """ + response = self.connection.make_request('DELETE', self.name, + query_args='cors', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def initiate_multipart_upload(self, key_name, headers=None, + reduced_redundancy=False, + metadata=None, encrypt_key=False, + policy=None): + """ + Start a multipart upload operation. + + .. note:: + + Note: After you initiate multipart upload and upload one or more + parts, you must either complete or abort multipart upload in order + to stop getting charged for storage of the uploaded parts. Only + after you either complete or abort multipart upload, Amazon S3 + frees up the parts storage and stops charging you for the parts + storage. + + :type key_name: string + :param key_name: The name of the key that will ultimately + result from this multipart upload operation. This will be + exactly as the key appears in the bucket after the upload + process has been completed. + + :type headers: dict + :param headers: Additional HTTP headers to send and store with the + resulting key in S3. + + :type reduced_redundancy: boolean + :param reduced_redundancy: In multipart uploads, the storage + class is specified when initiating the upload, not when + uploading individual parts. So if you want the resulting + key to use the reduced redundancy storage class set this + flag when you initiate the upload. + + :type metadata: dict + :param metadata: Any metadata that you would like to set on the key + that results from the multipart upload. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key (once completed) in S3. + """ + query_args = 'uploads' + provider = self.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + storage_class_header = provider.storage_class_header + if storage_class_header: + headers[storage_class_header] = 'REDUCED_REDUNDANCY' + # TODO: what if the provider doesn't support reduced redundancy? + # (see boto.s3.key.Key.set_contents_from_file) + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + if metadata is None: + metadata = {} + + headers = boto.utils.merge_meta(headers, metadata, + self.connection.provider) + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + resp = MultiPartUpload(self) + h = handler.XmlHandler(resp, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def complete_multipart_upload(self, key_name, upload_id, + xml_body, headers=None): + """ + Complete a multipart upload operation. + """ + query_args = 'uploadId=%s' % upload_id + if headers is None: + headers = {} + headers['Content-Type'] = 'text/xml' + response = self.connection.make_request('POST', self.name, key_name, + query_args=query_args, + headers=headers, data=xml_body) + contains_error = False + body = response.read().decode('utf-8') + # Some errors will be reported in the body of the response + # even though the HTTP response code is 200. This check + # does a quick and dirty peek in the body for an error element. + if body.find('') > 0: + contains_error = True + boto.log.debug(body) + if response.status == 200 and not contains_error: + resp = CompleteMultiPartUpload(self) + h = handler.XmlHandler(resp, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + # Use a dummy key to parse various response headers + # for versioning, encryption info and then explicitly + # set the completed MPU object values from key. + k = self.key_class(self) + k.handle_version_headers(response) + k.handle_encryption_headers(response) + resp.version_id = k.version_id + resp.encrypted = k.encrypted + return resp + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def cancel_multipart_upload(self, key_name, upload_id, headers=None): + """ + To verify that all parts have been removed, so you don't get charged + for the part storage, you should call the List Parts operation and + ensure the parts list is empty. + """ + query_args = 'uploadId=%s' % upload_id + response = self.connection.make_request('DELETE', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def delete(self, headers=None): + return self.connection.delete_bucket(self.name, headers=headers) + + def get_tags(self): + response = self.get_xml_tags() + tags = Tags() + h = handler.XmlHandler(tags, self) + if not isinstance(response, bytes): + response = response.encode('utf-8') + xml.sax.parseString(response, h) + return tags + + def get_xml_tags(self): + response = self.connection.make_request('GET', self.name, + query_args='tagging', + headers=None) + body = response.read() + if response.status == 200: + return body + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + + def set_xml_tags(self, tag_str, headers=None, query_args='tagging'): + if headers is None: + headers = {} + md5 = boto.utils.compute_md5(StringIO(tag_str)) + headers['Content-MD5'] = md5[1] + headers['Content-Type'] = 'text/xml' + if not isinstance(tag_str, bytes): + tag_str = tag_str.encode('utf-8') + response = self.connection.make_request('PUT', self.name, + data=tag_str, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 204: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) + return True + + def set_tags(self, tags, headers=None): + return self.set_xml_tags(tags.to_xml(), headers=headers) + + def delete_tags(self, headers=None): + response = self.connection.make_request('DELETE', self.name, + query_args='tagging', + headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 204: + return True + else: + raise self.connection.provider.storage_response_error( + response.status, response.reason, body) diff --git a/ext/boto/s3/bucketlistresultset.py b/ext/boto/s3/bucketlistresultset.py new file mode 100644 index 0000000000..d95ab68505 --- /dev/null +++ b/ext/boto/s3/bucketlistresultset.py @@ -0,0 +1,158 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import unquote_str + +def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None, + encoding_type=None): + """ + A generator function for listing keys in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_keys(prefix=prefix, marker=marker, + delimiter=delimiter, headers=headers, + encoding_type=encoding_type) + for k in rs: + yield k + if k: + marker = rs.next_marker or k.name + if marker and encoding_type == "url": + marker = unquote_str(marker) + more_results= rs.is_truncated + +class BucketListResultSet(object): + """ + A resultset for listing keys within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', + headers=None, encoding_type=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, marker=self.marker, + headers=self.headers, + encoding_type=self.encoding_type) + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + key_marker='', version_id_marker='', headers=None, + encoding_type=None): + """ + A generator function for listing versions in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, + version_id_marker=version_id_marker, + delimiter=delimiter, headers=headers, + max_keys=999, encoding_type=encoding_type) + for k in rs: + yield k + key_marker = rs.next_key_marker + if key_marker and encoding_type == "url": + key_marker = unquote_str(key_marker) + version_id_marker = rs.next_version_id_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet(object): + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None, encoding_type=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.key_marker = key_marker + self.version_id_marker = version_id_marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + key_marker=self.key_marker, + version_id_marker=self.version_id_marker, + headers=self.headers, + encoding_type=self.encoding_type) + +def multipart_upload_lister(bucket, key_marker='', + upload_id_marker='', + headers=None, encoding_type=None): + """ + A generator function for listing multipart uploads in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_multipart_uploads(key_marker=key_marker, + upload_id_marker=upload_id_marker, + headers=headers, + encoding_type=encoding_type) + for k in rs: + yield k + key_marker = rs.next_key_marker + if key_marker and encoding_type == "url": + key_marker = unquote_str(key_marker) + upload_id_marker = rs.next_upload_id_marker + more_results= rs.is_truncated + +class MultiPartUploadListResultSet(object): + """ + A resultset for listing multipart uploads within a bucket. + Uses the multipart_upload_lister generator function and + implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of uploads within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + def __init__(self, bucket=None, key_marker='', + upload_id_marker='', headers=None, encoding_type=None): + self.bucket = bucket + self.key_marker = key_marker + self.upload_id_marker = upload_id_marker + self.headers = headers + self.encoding_type = encoding_type + + def __iter__(self): + return multipart_upload_lister(self.bucket, + key_marker=self.key_marker, + upload_id_marker=self.upload_id_marker, + headers=self.headers, + encoding_type=self.encoding_type) diff --git a/ext/boto/s3/bucketlogging.py b/ext/boto/s3/bucketlogging.py new file mode 100644 index 0000000000..38cef1140e --- /dev/null +++ b/ext/boto/s3/bucketlogging.py @@ -0,0 +1,83 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax.saxutils +from boto.s3.acl import Grant + +class BucketLogging(object): + + def __init__(self, target=None, prefix=None, grants=None): + self.target = target + self.prefix = prefix + if grants is None: + self.grants = [] + else: + self.grants = grants + + def __repr__(self): + if self.target is None: + return "" + grants = [] + for g in self.grants: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email_address + grants.append("%s = %s" % (u, g.permission)) + return "" % (self.target, self.prefix, ", ".join(grants)) + + def add_grant(self, grant): + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant()) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'TargetBucket': + self.target = value + elif name == 'TargetPrefix': + self.prefix = value + else: + setattr(self, name, value) + + def to_xml(self): + # caller is responsible to encode to utf-8 + s = u'' + s += u'' + if self.target is not None: + s += u'' + s += u'%s' % self.target + prefix = self.prefix or '' + s += u'%s' % xml.sax.saxutils.escape(prefix) + if self.grants: + s += '' + for grant in self.grants: + s += grant.to_xml() + s += '' + s += u'' + s += u'' + return s diff --git a/ext/boto/s3/connection.py b/ext/boto/s3/connection.py new file mode 100644 index 0000000000..fa3fbd7205 --- /dev/null +++ b/ext/boto/s3/connection.py @@ -0,0 +1,672 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import base64 +from boto.compat import six, urllib +import time + +from boto.auth import detect_potential_s3sigv4 +import boto.utils +from boto.connection import AWSAuthConnection +from boto import handler +from boto.s3.bucket import Bucket +from boto.s3.key import Key +from boto.resultset import ResultSet +from boto.exception import BotoClientError, S3ResponseError + + +def check_lowercase_bucketname(n): + """ + Bucket names must not contain uppercase characters. We check for + this by appending a lowercase character and testing with islower(). + Note this also covers cases like numeric bucket names with dashes. + + >>> check_lowercase_bucketname("Aaaa") + Traceback (most recent call last): + ... + BotoClientError: S3Error: Bucket names cannot contain upper-case + characters when using either the sub-domain or virtual hosting calling + format. + + >>> check_lowercase_bucketname("1234-5678-9123") + True + >>> check_lowercase_bucketname("abcdefg1234") + True + """ + if not (n + 'a').islower(): + raise BotoClientError("Bucket names cannot contain upper-case " \ + "characters when using either the sub-domain or virtual " \ + "hosting calling format.") + return True + + +def assert_case_insensitive(f): + def wrapper(*args, **kwargs): + if len(args) == 3 and check_lowercase_bucketname(args[2]): + pass + return f(*args, **kwargs) + return wrapper + + +class _CallingFormat(object): + + def get_bucket_server(self, server, bucket): + return '' + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '%s://' % protocol + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + def build_host(self, server, bucket): + if bucket == '': + return server + else: + return self.get_bucket_server(server, bucket) + + def build_auth_path(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path = '' + if bucket != '': + path = '/' + bucket + return path + '/%s' % urllib.parse.quote(key) + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + return '/%s' % urllib.parse.quote(key) + + +class SubdomainCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return '%s.%s' % (bucket, server) + + +class VHostCallingFormat(_CallingFormat): + + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return bucket + + +class OrdinaryCallingFormat(_CallingFormat): + + def get_bucket_server(self, server, bucket): + return server + + def build_path_base(self, bucket, key=''): + key = boto.utils.get_utf8_value(key) + path_base = '/' + if bucket: + path_base += "%s/" % bucket + return path_base + urllib.parse.quote(key) + + +class ProtocolIndependentOrdinaryCallingFormat(OrdinaryCallingFormat): + + def build_url_base(self, connection, protocol, server, bucket, key=''): + url_base = '//' + url_base += self.build_host(server, bucket) + url_base += connection.get_path(self.build_path_base(bucket, key)) + return url_base + + +class Location(object): + + DEFAULT = '' # US Classic Region + EU = 'EU' # Ireland + EUCentral1 = 'eu-central-1' # Frankfurt + USWest = 'us-west-1' + USWest2 = 'us-west-2' + SAEast = 'sa-east-1' + APNortheast = 'ap-northeast-1' + APSoutheast = 'ap-southeast-1' + APSoutheast2 = 'ap-southeast-2' + CNNorth1 = 'cn-north-1' + + +class NoHostProvided(object): + # An identifying object to help determine whether the user provided a + # ``host`` or not. Never instantiated. + pass + + +class HostRequiredError(BotoClientError): + pass + + +class S3Connection(AWSAuthConnection): + + DefaultHost = 's3.amazonaws.com' + DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat') + QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=NoHostProvided, debug=0, https_connection_factory=None, + calling_format=DefaultCallingFormat, path='/', + provider='aws', bucket_class=Bucket, security_token=None, + suppress_consec_slashes=True, anon=False, + validate_certs=None, profile_name=None): + no_host_provided = False + # Try falling back to the boto config file's value, if present. + if host is NoHostProvided: + host = boto.config.get('s3', 'host') + if host is None: + host = self.DefaultHost + no_host_provided = True + if isinstance(calling_format, six.string_types): + calling_format=boto.utils.find_class(calling_format)() + self.calling_format = calling_format + self.bucket_class = bucket_class + self.anon = anon + super(S3Connection, self).__init__(host, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + debug=debug, https_connection_factory=https_connection_factory, + path=path, provider=provider, security_token=security_token, + suppress_consec_slashes=suppress_consec_slashes, + validate_certs=validate_certs, profile_name=profile_name) + # We need to delay until after the call to ``super`` before checking + # to see if SigV4 is in use. + if no_host_provided: + if 'hmac-v4-s3' in self._required_auth_capability(): + raise HostRequiredError( + "When using SigV4, you must specify a 'host' parameter." + ) + + @detect_potential_s3sigv4 + def _required_auth_capability(self): + if self.anon: + return ['anon'] + else: + return ['s3'] + + def __iter__(self): + for bucket in self.get_all_buckets(): + yield bucket + + def __contains__(self, bucket_name): + return not (self.lookup(bucket_name) is None) + + def set_bucket_class(self, bucket_class): + """ + Set the Bucket class associated with this bucket. By default, this + would be the boto.s3.key.Bucket class but if you want to subclass that + for some reason this allows you to associate your new class. + + :type bucket_class: class + :param bucket_class: A subclass of Bucket that can be more specific + """ + self.bucket_class = bucket_class + + def build_post_policy(self, expiration_time, conditions): + """ + Taken from the AWS book Python examples and modified for use with boto + """ + assert isinstance(expiration_time, time.struct_time), \ + 'Policy document must include a valid expiration Time object' + + # Convert conditions object mappings to condition statements + + return '{"expiration": "%s",\n"conditions": [%s]}' % \ + (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) + + def build_post_form_args(self, bucket_name, key, expires_in=6000, + acl=None, success_action_redirect=None, + max_content_length=None, + http_method='http', fields=None, + conditions=None, storage_class='STANDARD', + server_side_encryption=None): + """ + Taken from the AWS book Python examples and modified for use with boto + This only returns the arguments required for the post form, not the + actual form. This does not return the file input field which also + needs to be added + + :type bucket_name: string + :param bucket_name: Bucket to submit to + + :type key: string + :param key: Key name, optionally add ${filename} to the end to + attach the submitted filename + + :type expires_in: integer + :param expires_in: Time (in seconds) before this expires, defaults + to 6000 + + :type acl: string + :param acl: A canned ACL. One of: + * private + * public-read + * public-read-write + * authenticated-read + * bucket-owner-read + * bucket-owner-full-control + + :type success_action_redirect: string + :param success_action_redirect: URL to redirect to on success + + :type max_content_length: integer + :param max_content_length: Maximum size for this file + + :type http_method: string + :param http_method: HTTP Method to use, "http" or "https" + + :type storage_class: string + :param storage_class: Storage class to use for storing the object. + Valid values: STANDARD | REDUCED_REDUNDANCY + + :type server_side_encryption: string + :param server_side_encryption: Specifies server-side encryption + algorithm to use when Amazon S3 creates an object. + Valid values: None | AES256 + + :rtype: dict + :return: A dictionary containing field names/values as well as + a url to POST to + + .. code-block:: python + + + """ + if fields is None: + fields = [] + if conditions is None: + conditions = [] + expiration = time.gmtime(int(time.time() + expires_in)) + + # Generate policy document + conditions.append('{"bucket": "%s"}' % bucket_name) + if key.endswith("${filename}"): + conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) + else: + conditions.append('{"key": "%s"}' % key) + if acl: + conditions.append('{"acl": "%s"}' % acl) + fields.append({"name": "acl", "value": acl}) + if success_action_redirect: + conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) + fields.append({"name": "success_action_redirect", "value": success_action_redirect}) + if max_content_length: + conditions.append('["content-length-range", 0, %i]' % max_content_length) + + if self.provider.security_token: + fields.append({'name': 'x-amz-security-token', + 'value': self.provider.security_token}) + conditions.append('{"x-amz-security-token": "%s"}' % self.provider.security_token) + + if storage_class: + fields.append({'name': 'x-amz-storage-class', + 'value': storage_class}) + conditions.append('{"x-amz-storage-class": "%s"}' % storage_class) + + if server_side_encryption: + fields.append({'name': 'x-amz-server-side-encryption', + 'value': server_side_encryption}) + conditions.append('{"x-amz-server-side-encryption": "%s"}' % server_side_encryption) + + policy = self.build_post_policy(expiration, conditions) + + # Add the base64-encoded policy document as the 'policy' field + policy_b64 = base64.b64encode(policy) + fields.append({"name": "policy", "value": policy_b64}) + + # Add the AWS access key as the 'AWSAccessKeyId' field + fields.append({"name": "AWSAccessKeyId", + "value": self.aws_access_key_id}) + + # Add signature for encoded policy document as the + # 'signature' field + signature = self._auth_handler.sign_string(policy_b64) + fields.append({"name": "signature", "value": signature}) + fields.append({"name": "key", "value": key}) + + # HTTPS protocol will be used if the secure HTTP option is enabled. + url = '%s://%s/' % (http_method, + self.calling_format.build_host(self.server_name(), + bucket_name)) + + return {"action": url, "fields": fields} + + def generate_url_sigv4(self, expires_in, method, bucket='', key='', + headers=None, force_http=False, + response_headers=None, version_id=None, + iso_date=None): + path = self.calling_format.build_path_base(bucket, key) + auth_path = self.calling_format.build_auth_path(bucket, key) + host = self.calling_format.build_host(self.server_name(), bucket) + + # For presigned URLs we should ignore the port if it's HTTPS + if host.endswith(':443'): + host = host[:-4] + + params = {} + if version_id is not None: + params['VersionId'] = version_id + + if response_headers is not None: + params.update(response_headers) + + http_request = self.build_base_http_request(method, path, auth_path, + headers=headers, host=host, + params=params) + + return self._auth_handler.presign(http_request, expires_in, + iso_date=iso_date) + + def generate_url(self, expires_in, method, bucket='', key='', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None): + if self._auth_handler.capability[0] == 'hmac-v4-s3' and query_auth: + # Handle the special sigv4 case + return self.generate_url_sigv4(expires_in, method, bucket=bucket, + key=key, headers=headers, force_http=force_http, + response_headers=response_headers, version_id=version_id) + + headers = headers or {} + if expires_in_absolute: + expires = int(expires_in) + else: + expires = int(time.time() + expires_in) + auth_path = self.calling_format.build_auth_path(bucket, key) + auth_path = self.get_path(auth_path) + # optional version_id and response_headers need to be added to + # the query param list. + extra_qp = [] + if version_id is not None: + extra_qp.append("versionId=%s" % version_id) + if response_headers: + for k, v in response_headers.items(): + extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) + if self.provider.security_token: + headers['x-amz-security-token'] = self.provider.security_token + if extra_qp: + delimiter = '?' if '?' not in auth_path else '&' + auth_path += delimiter + '&'.join(extra_qp) + self.calling_format.build_path_base(bucket, key) + if query_auth and not self.anon: + c_string = boto.utils.canonical_string(method, auth_path, headers, + expires, self.provider) + b64_hmac = self._auth_handler.sign_string(c_string) + encoded_canonical = urllib.parse.quote(b64_hmac, safe='') + query_part = '?' + self.QueryString % (encoded_canonical, expires, + self.aws_access_key_id) + else: + query_part = '' + if headers: + hdr_prefix = self.provider.header_prefix + for k, v in headers.items(): + if k.startswith(hdr_prefix): + # headers used for sig generation must be + # included in the url also. + extra_qp.append("%s=%s" % (k, urllib.parse.quote(v))) + if extra_qp: + delimiter = '?' if not query_part else '&' + query_part += delimiter + '&'.join(extra_qp) + if force_http: + protocol = 'http' + port = 80 + else: + protocol = self.protocol + port = self.port + return self.calling_format.build_url_base(self, protocol, + self.server_name(port), + bucket, key) + query_part + + def get_all_buckets(self, headers=None): + response = self.make_request('GET', headers=headers) + body = response.read() + if response.status > 300: + raise self.provider.storage_response_error( + response.status, response.reason, body) + rs = ResultSet([('Bucket', self.bucket_class)]) + h = handler.XmlHandler(rs, self) + if not isinstance(body, bytes): + body = body.encode('utf-8') + xml.sax.parseString(body, h) + return rs + + def get_canonical_user_id(self, headers=None): + """ + Convenience method that returns the "CanonicalUserID" of the + user who's credentials are associated with the connection. + The only way to get this value is to do a GET request on the + service which returns all buckets associated with the account. + As part of that response, the canonical userid is returned. + This method simply does all of that and then returns just the + user id. + + :rtype: string + :return: A string containing the canonical user id. + """ + rs = self.get_all_buckets(headers=headers) + return rs.owner.id + + def get_bucket(self, bucket_name, validate=True, headers=None): + """ + Retrieves a bucket by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. If + you are unsure if the bucket exists or not, you can use the + ``S3Connection.lookup`` method, which will either return a valid bucket + or ``None``. + + If ``validate=False`` is passed, no request is made to the service (no + charge/communication delay). This is only safe to do if you are **sure** + the bucket exists. + + If the default ``validate=True`` is passed, a request is made to the + service to ensure the bucket exists. Prior to Boto v2.25.0, this fetched + a list of keys (but with a max limit set to ``0``, always returning an empty + list) in the bucket (& included better error messages), at an + increased expense. As of Boto v2.25.0, this now performs a HEAD request + (less expensive but worse error messages). + + If you were relying on parsing the error message before, you should call + something like:: + + bucket = conn.get_bucket('', validate=False) + bucket.get_all_keys(maxkeys=0) + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to verify the bucket exists + on the service-side. (Default: ``True``) + """ + if validate: + return self.head_bucket(bucket_name, headers=headers) + else: + return self.bucket_class(self, bucket_name) + + def head_bucket(self, bucket_name, headers=None): + """ + Determines if a bucket exists by name. + + If the bucket does not exist, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :returns: A object + """ + response = self.make_request('HEAD', bucket_name, headers=headers) + body = response.read() + if response.status == 200: + return self.bucket_class(self, bucket_name) + elif response.status == 403: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'AccessDenied' + err.error_message = 'Access Denied' + raise err + elif response.status == 404: + # For backward-compatibility, we'll populate part of the exception + # with the most-common default. + err = self.provider.storage_response_error( + response.status, + response.reason, + body + ) + err.error_code = 'NoSuchBucket' + err.error_message = 'The specified bucket does not exist' + raise err + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def lookup(self, bucket_name, validate=True, headers=None): + """ + Attempts to get a bucket from S3. + + Works identically to ``S3Connection.get_bucket``, save for that it + will return ``None`` if the bucket does not exist instead of throwing + an exception. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + + :type validate: boolean + :param validate: If ``True``, it will try to fetch all keys within the + given bucket. (Default: ``True``) + """ + try: + bucket = self.get_bucket(bucket_name, validate, headers=headers) + except: + bucket = None + return bucket + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None): + """ + Creates a new located bucket. By default it's in the USA. You can pass + Location.EU to create a European bucket (S3) or European Union bucket + (GCS). + + :type bucket_name: string + :param bucket_name: The name of the new bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type location: str + :param location: The location of the new bucket. You can use one of the + constants in :class:`boto.s3.connection.Location` (e.g. Location.EU, + Location.USWest, etc.). + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + """ + check_lowercase_bucketname(bucket_name) + + if policy: + if headers: + headers[self.provider.acl_header] = policy + else: + headers = {self.provider.acl_header: policy} + if location == Location.DEFAULT: + data = '' + else: + data = '' + \ + location + '' + response = self.make_request('PUT', bucket_name, headers=headers, + data=data) + body = response.read() + if response.status == 409: + raise self.provider.storage_create_error( + response.status, response.reason, body) + if response.status == 200: + return self.bucket_class(self, bucket_name) + else: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def delete_bucket(self, bucket, headers=None): + """ + Removes an S3 bucket. + + In order to remove the bucket, it must first be empty. If the bucket is + not empty, an ``S3ResponseError`` will be raised. + + :type bucket_name: string + :param bucket_name: The name of the bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to + AWS. + """ + response = self.make_request('DELETE', bucket, headers=headers) + body = response.read() + if response.status != 204: + raise self.provider.storage_response_error( + response.status, response.reason, body) + + def make_request(self, method, bucket='', key='', headers=None, data='', + query_args=None, sender=None, override_num_retries=None, + retry_handler=None): + if isinstance(bucket, self.bucket_class): + bucket = bucket.name + if isinstance(key, Key): + key = key.name + path = self.calling_format.build_path_base(bucket, key) + boto.log.debug('path=%s' % path) + auth_path = self.calling_format.build_auth_path(bucket, key) + boto.log.debug('auth_path=%s' % auth_path) + host = self.calling_format.build_host(self.server_name(), bucket) + if query_args: + path += '?' + query_args + boto.log.debug('path=%s' % path) + auth_path += '?' + query_args + boto.log.debug('auth_path=%s' % auth_path) + return super(S3Connection, self).make_request( + method, path, headers, + data, host, auth_path, sender, + override_num_retries=override_num_retries, + retry_handler=retry_handler + ) diff --git a/ext/boto/s3/cors.py b/ext/boto/s3/cors.py new file mode 100644 index 0000000000..d97ee890aa --- /dev/null +++ b/ext/boto/s3/cors.py @@ -0,0 +1,210 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + +class CORSRule(object): + """ + CORS rule for a bucket. + + :ivar id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :ivar allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :ivar allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins cross-domain access. + + :ivar allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :ivar max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :ivar expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + + def __init__(self, allowed_method=None, allowed_origin=None, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + if allowed_method is None: + allowed_method = [] + self.allowed_method = allowed_method + if allowed_origin is None: + allowed_origin = [] + self.allowed_origin = allowed_origin + self.id = id + if allowed_header is None: + allowed_header = [] + self.allowed_header = allowed_header + self.max_age_seconds = max_age_seconds + if expose_header is None: + expose_header = [] + self.expose_header = expose_header + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'AllowedMethod': + self.allowed_method.append(value) + elif name == 'AllowedOrigin': + self.allowed_origin.append(value) + elif name == 'AllowedHeader': + self.allowed_header.append(value) + elif name == 'MaxAgeSeconds': + self.max_age_seconds = int(value) + elif name == 'ExposeHeader': + self.expose_header.append(value) + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for allowed_method in self.allowed_method: + s += '%s' % allowed_method + for allowed_origin in self.allowed_origin: + s += '%s' % allowed_origin + for allowed_header in self.allowed_header: + s += '%s' % allowed_header + for expose_header in self.expose_header: + s += '%s' % expose_header + if self.max_age_seconds: + s += '%d' % self.max_age_seconds + if self.id: + s += '%s' % self.id + s += '' + return s + + +class CORSConfiguration(list): + """ + A container for the rules associated with a CORS configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'CORSRule': + rule = CORSRule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, allowed_method, allowed_origin, + id=None, allowed_header=None, max_age_seconds=None, + expose_header=None): + """ + Add a rule to this CORS configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this CORS config object + to the set_cors method of the Bucket object. + + :type allowed_methods: list of str + :param allowed_methods: An HTTP method that you want to allow the + origin to execute. Each CORSRule must identify at least one + origin and one method. Valid values are: + GET|PUT|HEAD|POST|DELETE + + :type allowed_origin: list of str + :param allowed_origin: An origin that you want to allow cross-domain + requests from. This can contain at most one * wild character. + Each CORSRule must identify at least one origin and one method. + The origin value can include at most one '*' wild character. + For example, "http://*.example.com". You can also specify + only * as the origin value allowing all origins + cross-domain access. + + :type id: str + :param id: A unique identifier for the rule. The ID value can be + up to 255 characters long. The IDs help you find a rule in + the configuration. + + :type allowed_header: list of str + :param allowed_header: Specifies which headers are allowed in a + pre-flight OPTIONS request via the + Access-Control-Request-Headers header. Each header name + specified in the Access-Control-Request-Headers header must + have a corresponding entry in the rule. Amazon S3 will send + only the allowed headers in a response that were requested. + This can contain at most one * wild character. + + :type max_age_seconds: int + :param max_age_seconds: The time in seconds that your browser is to + cache the preflight response for the specified resource. + + :type expose_header: list of str + :param expose_header: One or more headers in the response that you + want customers to be able to access from their applications + (for example, from a JavaScript XMLHttpRequest object). You + add one ExposeHeader element in the rule for each header. + """ + if not isinstance(allowed_method, (list, tuple)): + allowed_method = [allowed_method] + if not isinstance(allowed_origin, (list, tuple)): + allowed_origin = [allowed_origin] + if not isinstance(allowed_origin, (list, tuple)): + if allowed_origin is None: + allowed_origin = [] + else: + allowed_origin = [allowed_origin] + if not isinstance(expose_header, (list, tuple)): + if expose_header is None: + expose_header = [] + else: + expose_header = [expose_header] + rule = CORSRule(allowed_method, allowed_origin, id, allowed_header, + max_age_seconds, expose_header) + self.append(rule) diff --git a/ext/boto/s3/deletemarker.py b/ext/boto/s3/deletemarker.py new file mode 100644 index 0000000000..d8e7cc8b58 --- /dev/null +++ b/ext/boto/s3/deletemarker.py @@ -0,0 +1,55 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + +class DeleteMarker(object): + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.version_id = None + self.is_latest = False + self.last_modified = None + self.owner = None + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) diff --git a/ext/boto/s3/key.py b/ext/boto/s3/key.py new file mode 100644 index 0000000000..67b41378ab --- /dev/null +++ b/ext/boto/s3/key.py @@ -0,0 +1,1933 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Nexenta Systems Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import email.utils +import errno +import hashlib +import mimetypes +import os +import re +import base64 +import binascii +import math +from hashlib import md5 +import boto.utils +from boto.compat import BytesIO, six, urllib, encodebytes + +from boto.exception import BotoClientError +from boto.exception import StorageDataError +from boto.exception import PleaseRetryException +from boto.provider import Provider +from boto.s3.keyfile import KeyFile +from boto.s3.user import User +from boto import UserAgent +from boto.utils import compute_md5, compute_hash +from boto.utils import find_matching_headers +from boto.utils import merge_headers_by_name + + +class Key(object): + """ + Represents a key (object) in an S3 bucket. + + :ivar bucket: The parent :class:`boto.s3.bucket.Bucket`. + :ivar name: The name of this Key object. + :ivar metadata: A dictionary containing user metadata that you + wish to store with the object or that has been retrieved from + an existing object. + :ivar cache_control: The value of the `Cache-Control` HTTP header. + :ivar content_type: The value of the `Content-Type` HTTP header. + :ivar content_encoding: The value of the `Content-Encoding` HTTP header. + :ivar content_disposition: The value of the `Content-Disposition` HTTP + header. + :ivar content_language: The value of the `Content-Language` HTTP header. + :ivar etag: The `etag` associated with this object. + :ivar last_modified: The string timestamp representing the last + time this object was modified in S3. + :ivar owner: The ID of the owner of this object. + :ivar storage_class: The storage class of the object. Currently, one of: + STANDARD | REDUCED_REDUNDANCY | GLACIER + :ivar md5: The MD5 hash of the contents of the object. + :ivar size: The size, in bytes, of the object. + :ivar version_id: The version ID of this object, if it is a versioned + object. + :ivar encrypted: Whether the object is encrypted while at rest on + the server. + """ + + DefaultContentType = 'application/octet-stream' + + RestoreBody = """ + + %s + """ + + + BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192) + + # The object metadata fields a user can set, other than custom metadata + # fields (i.e., those beginning with a provider-specific prefix like + # x-amz-meta). + base_user_settable_fields = set(["cache-control", "content-disposition", + "content-encoding", "content-language", + "content-md5", "content-type", + "x-robots-tag", "expires"]) + _underscore_base_user_settable_fields = set() + for f in base_user_settable_fields: + _underscore_base_user_settable_fields.add(f.replace('-', '_')) + # Metadata fields, whether user-settable or not, other than custom + # metadata fields (i.e., those beginning with a provider specific prefix + # like x-amz-meta). + base_fields = (base_user_settable_fields | + set(["last-modified", "content-length", "date", "etag"])) + + + + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.metadata = {} + self.cache_control = None + self.content_type = self.DefaultContentType + self.content_encoding = None + self.content_disposition = None + self.content_language = None + self.filename = None + self.etag = None + self.is_latest = False + self.last_modified = None + self.owner = None + self._storage_class = None + self.path = None + self.resp = None + self.mode = None + self.size = None + self.version_id = None + self.source_version_id = None + self.delete_marker = False + self.encrypted = None + # If the object is being restored, this attribute will be set to True. + # If the object is restored, it will be set to False. Otherwise this + # value will be None. If the restore is completed (ongoing_restore = + # False), the expiry_date will be populated with the expiry date of the + # restored object. + self.ongoing_restore = None + self.expiry_date = None + self.local_hashes = {} + + def __repr__(self): + if self.bucket: + name = u'' % (self.bucket.name, self.name) + else: + name = u'' % self.name + + # Encode to bytes for Python 2 to prevent display decoding issues + if not isinstance(name, str): + name = name.encode('utf-8') + + return name + + def __iter__(self): + return self + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + + def _get_key(self): + return self.name + + def _set_key(self, value): + self.name = value + + key = property(_get_key, _set_key); + + def _get_md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + return binascii.b2a_hex(self.local_hashes['md5']) + + def _set_md5(self, value): + if value: + self.local_hashes['md5'] = binascii.a2b_hex(value) + elif 'md5' in self.local_hashes: + self.local_hashes.pop('md5', None) + + md5 = property(_get_md5, _set_md5); + + def _get_base64md5(self): + if 'md5' in self.local_hashes and self.local_hashes['md5']: + md5 = self.local_hashes['md5'] + if not isinstance(md5, bytes): + md5 = md5.encode('utf-8') + return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n') + + def _set_base64md5(self, value): + if value: + if not isinstance(value, six.string_types): + value = value.decode('utf-8') + self.local_hashes['md5'] = binascii.a2b_base64(value) + elif 'md5' in self.local_hashes: + del self.local_hashes['md5'] + + base64md5 = property(_get_base64md5, _set_base64md5); + + def _get_storage_class(self): + if self._storage_class is None and self.bucket: + # Attempt to fetch storage class + list_items = list(self.bucket.list(self.name.encode('utf-8'))) + if len(list_items) and getattr(list_items[0], '_storage_class', + None): + self._storage_class = list_items[0]._storage_class + else: + # Key is not yet saved? Just use default... + self._storage_class = 'STANDARD' + + return self._storage_class + + def _set_storage_class(self, value): + self._storage_class = value + + storage_class = property(_get_storage_class, _set_storage_class) + + def get_md5_from_hexdigest(self, md5_hexdigest): + """ + A utility function to create the 2-tuple (md5hexdigest, base64md5) + from just having a precalculated md5_hexdigest. + """ + digest = binascii.unhexlify(md5_hexdigest) + base64md5 = encodebytes(digest) + if base64md5[-1] == '\n': + base64md5 = base64md5[0:-1] + return (md5_hexdigest, base64md5) + + def handle_encryption_headers(self, resp): + provider = self.bucket.connection.provider + if provider.server_side_encryption_header: + self.encrypted = resp.getheader( + provider.server_side_encryption_header, None) + else: + self.encrypted = None + + def handle_storage_class_header(self, resp): + provider = self.bucket.connection.provider + if provider.storage_class_header: + self._storage_class = resp.getheader( + provider.storage_class_header, None) + if (self._storage_class is None and + provider.get_provider_name() == 'aws'): + # S3 docs for HEAD object requests say S3 will return this + # header for all objects except Standard storage class objects. + self._storage_class = 'STANDARD' + + + def handle_version_headers(self, resp, force=False): + provider = self.bucket.connection.provider + # If the Key object already has a version_id attribute value, it + # means that it represents an explicit version and the user is + # doing a get_contents_*(version_id=) to retrieve another + # version of the Key. In that case, we don't really want to + # overwrite the version_id in this Key object. Comprende? + if self.version_id is None or force: + self.version_id = resp.getheader(provider.version_id, None) + self.source_version_id = resp.getheader(provider.copy_source_version_id, + None) + if resp.getheader(provider.delete_marker, 'false') == 'true': + self.delete_marker = True + else: + self.delete_marker = False + + def handle_restore_headers(self, response): + provider = self.bucket.connection.provider + header = response.getheader(provider.restore_header) + if header is None: + return + parts = header.split(',', 1) + for part in parts: + key, val = [i.strip() for i in part.split('=')] + val = val.replace('"', '') + if key == 'ongoing-request': + self.ongoing_restore = True if val.lower() == 'true' else False + elif key == 'expiry-date': + self.expiry_date = val + + def handle_addl_headers(self, headers): + """ + Used by Key subclasses to do additional, provider-specific + processing of response headers. No-op for this base class. + """ + pass + + def open_read(self, headers=None, query_args='', + override_num_retries=None, response_headers=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string + (ie, 'torrent') + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + """ + if self.resp is None: + self.mode = 'r' + + provider = self.bucket.connection.provider + self.resp = self.bucket.connection.make_request( + 'GET', self.bucket.name, self.name, headers, + query_args=query_args, + override_num_retries=override_num_retries) + if self.resp.status < 199 or self.resp.status > 299: + body = self.resp.read() + raise provider.storage_response_error(self.resp.status, + self.resp.reason, body) + response_headers = self.resp.msg + self.metadata = boto.utils.get_aws_metadata(response_headers, + provider) + for name, value in response_headers.items(): + # To get correct size for Range GETs, use Content-Range + # header if one was returned. If not, use Content-Length + # header. + if (name.lower() == 'content-length' and + 'Content-Range' not in response_headers): + self.size = int(value) + elif name.lower() == 'content-range': + end_range = re.sub('.*/(.*)', '\\1', value) + self.size = int(end_range) + elif name.lower() in Key.base_fields: + self.__dict__[name.lower().replace('-', '_')] = value + self.handle_version_headers(self.resp) + self.handle_encryption_headers(self.resp) + self.handle_restore_headers(self.resp) + self.handle_addl_headers(self.resp.getheaders()) + + def open_write(self, headers=None, override_num_retries=None): + """ + Open this key for writing. + Not yet implemented + + :type headers: dict + :param headers: Headers to pass in the write request + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying PUT. + """ + raise BotoClientError('Not Implemented') + + def open(self, mode='r', headers=None, query_args=None, + override_num_retries=None): + if mode == 'r': + self.mode = 'r' + self.open_read(headers=headers, query_args=query_args, + override_num_retries=override_num_retries) + elif mode == 'w': + self.mode = 'w' + self.open_write(headers=headers, + override_num_retries=override_num_retries) + else: + raise BotoClientError('Invalid mode: %s' % mode) + + closed = False + + def close(self, fast=False): + """ + Close this key. + + :type fast: bool + :param fast: True if you want the connection to be closed without first + reading the content. This should only be used in cases where subsequent + calls don't need to return the content from the open HTTP connection. + Note: As explained at + http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse, + callers must read the whole response before sending a new request to the + server. Calling Key.close(fast=True) and making a subsequent request to + the server will work because boto will get an httplib exception and + close/reopen the connection. + + """ + if self.resp and not fast: + self.resp.read() + self.resp = None + self.mode = None + self.closed = True + + def next(self): + """ + By providing a next method, the key object supports use as an iterator. + For example, you can now say: + + for bytes in key: + write bytes to a file or whatever + + All of the HTTP connection stuff is handled for you. + """ + self.open_read() + data = self.resp.read(self.BufferSize) + if not data: + self.close() + raise StopIteration + return data + + # Python 3 iterator support + __next__ = next + + def read(self, size=0): + self.open_read() + if size == 0: + data = self.resp.read() + else: + data = self.resp.read(size) + if not data: + self.close() + return data + + def change_storage_class(self, new_storage_class, dst_bucket=None, + validate_dst_bucket=True): + """ + Change the storage class of an existing key. + Depending on whether a different destination bucket is supplied + or not, this will either move the item within the bucket, preserving + all metadata and ACL info bucket changing the storage class or it + will copy the item to the provided destination bucket, also + preserving metadata and ACL info. + + :type new_storage_class: string + :param new_storage_class: The new storage class for the Key. + Possible values are: + * STANDARD + * REDUCED_REDUNDANCY + + :type dst_bucket: string + :param dst_bucket: The name of a destination bucket. If not + provided the current bucket of the key will be used. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + """ + bucket_name = dst_bucket or self.bucket.name + if new_storage_class == 'STANDARD': + return self.copy(bucket_name, self.name, + reduced_redundancy=False, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + elif new_storage_class == 'REDUCED_REDUNDANCY': + return self.copy(bucket_name, self.name, + reduced_redundancy=True, preserve_acl=True, + validate_dst_bucket=validate_dst_bucket) + else: + raise BotoClientError('Invalid storage class: %s' % + new_storage_class) + + def copy(self, dst_bucket, dst_key, metadata=None, + reduced_redundancy=False, preserve_acl=False, + encrypt_key=False, validate_dst_bucket=True): + """ + Copy this Key to another bucket. + + :type dst_bucket: string + :param dst_bucket: The name of the destination bucket + + :type dst_key: string + :param dst_key: The name of the destination key + + :type metadata: dict + :param metadata: Metadata to be associated with new key. If + metadata is supplied, it will replace the metadata of the + source key being copied. If no metadata is supplied, the + source key's metadata will be copied to the new key. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will force the + storage class of the new Key to be REDUCED_REDUNDANCY + regardless of the storage class of the key being copied. + The Reduced Redundancy Storage (RRS) feature of S3, + provides lower redundancy at lower storage cost. + + :type preserve_acl: bool + :param preserve_acl: If True, the ACL from the source key will + be copied to the destination key. If False, the + destination key will have the default ACL. Note that + preserving the ACL in the new key object will require two + additional API calls to S3, one to retrieve the current + ACL and one to set that ACL on the new object. If you + don't care about the ACL, a value of False will be + significantly more efficient. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type validate_dst_bucket: bool + :param validate_dst_bucket: If True, will validate the dst_bucket + by using an extra list request. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + dst_bucket = self.bucket.connection.lookup(dst_bucket, + validate_dst_bucket) + if reduced_redundancy: + storage_class = 'REDUCED_REDUNDANCY' + else: + storage_class = self.storage_class + return dst_bucket.copy_key(dst_key, self.bucket.name, + self.name, metadata, + storage_class=storage_class, + preserve_acl=preserve_acl, + encrypt_key=encrypt_key, + src_version_id=self.version_id) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value + elif name == 'ETag': + self.etag = value + elif name == 'IsLatest': + if value == 'true': + self.is_latest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) + + def exists(self, headers=None): + """ + Returns True if the key exists + + :rtype: bool + :return: Whether the key exists on S3 + """ + return bool(self.bucket.lookup(self.name, headers=headers)) + + def delete(self, headers=None): + """ + Delete this key from S3 + """ + return self.bucket.delete_key(self.name, version_id=self.version_id, + headers=headers) + + def get_metadata(self, name): + return self.metadata.get(name) + + def set_metadata(self, name, value): + # Ensure that metadata that is vital to signing is in the correct + # case. Applies to ``Content-Type`` & ``Content-MD5``. + if name.lower() == 'content-type': + self.metadata['Content-Type'] = value + elif name.lower() == 'content-md5': + self.metadata['Content-MD5'] = value + else: + self.metadata[name] = value + if name.lower() in Key.base_user_settable_fields: + self.__dict__[name.lower().replace('-', '_')] = value + + def update_metadata(self, d): + self.metadata.update(d) + + # convenience methods for setting/getting ACL + def set_acl(self, acl_str, headers=None): + if self.bucket is not None: + self.bucket.set_acl(acl_str, self.name, headers=headers) + + def get_acl(self, headers=None): + if self.bucket is not None: + return self.bucket.get_acl(self.name, headers=headers) + + def get_xml_acl(self, headers=None): + if self.bucket is not None: + return self.bucket.get_xml_acl(self.name, headers=headers) + + def set_xml_acl(self, acl_str, headers=None): + if self.bucket is not None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) + + def set_canned_acl(self, acl_str, headers=None): + return self.bucket.set_canned_acl(acl_str, self.name, headers) + + def get_redirect(self): + """Return the redirect location configured for this key. + + If no redirect is configured (via set_redirect), then None + will be returned. + + """ + response = self.bucket.connection.make_request( + 'HEAD', self.bucket.name, self.name) + if response.status == 200: + return response.getheader('x-amz-website-redirect-location') + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def set_redirect(self, redirect_location, headers=None): + """Configure this key to redirect to another location. + + When the bucket associated with this key is accessed from the website + endpoint, a 301 redirect will be issued to the specified + `redirect_location`. + + :type redirect_location: string + :param redirect_location: The location to redirect. + + """ + if headers is None: + headers = {} + else: + headers = headers.copy() + + headers['x-amz-website-redirect-location'] = redirect_location + response = self.bucket.connection.make_request('PUT', self.bucket.name, + self.name, headers) + if response.status == 200: + return True + else: + raise self.provider.storage_response_error( + response.status, response.reason, response.read()) + + def make_public(self, headers=None): + return self.bucket.set_canned_acl('public-read', self.name, headers) + + def generate_url(self, expires_in, method='GET', headers=None, + query_auth=True, force_http=False, response_headers=None, + expires_in_absolute=False, version_id=None, + policy=None, reduced_redundancy=False, encrypt_key=False): + """ + Generate a URL to access this key. + + :type expires_in: int + :param expires_in: How long the url is valid for, in seconds. + + :type method: string + :param method: The method to use for retrieving the file + (default is GET). + + :type headers: dict + :param headers: Any headers to pass along in the request. + + :type query_auth: bool + :param query_auth: If True, signs the request in the URL. + + :type force_http: bool + :param force_http: If True, http will be used instead of https. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type expires_in_absolute: bool + :param expires_in_absolute: + + :type version_id: string + :param version_id: The version_id of the object to GET. If specified + this overrides any value in the key. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :rtype: string + :return: The URL to access the key + """ + provider = self.bucket.connection.provider + version_id = version_id or self.version_id + if headers is None: + headers = {} + else: + headers = headers.copy() + + # add headers accordingly (usually PUT case) + if policy: + headers[provider.acl_header] = policy + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + + return self.bucket.connection.generate_url(expires_in, method, + self.bucket.name, self.name, + headers, query_auth, + force_http, + response_headers, + expires_in_absolute, + version_id) + + def send_file(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None): + """ + Upload a file to a key into a bucket on S3. + + :type fp: file + :param fp: The file pointer to upload. The file pointer must + point at the offset from which you wish to upload. + ie. if uploading the full file, it should point at the + start of the file. Normally when a file is opened for + reading, the fp will point at the first byte. See the + bytes parameter below for more info. + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file + transfer. Providing a negative integer will cause your + callback to be called with each buffer read. + + :type query_args: string + :param query_args: (optional) Arguments to pass in the query string. + + :type chunked_transfer: boolean + :param chunked_transfer: (optional) If true, we use chunked + Transfer-Encoding. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + + def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10, + query_args=None, chunked_transfer=False, size=None, + hash_algs=None): + provider = self.bucket.connection.provider + try: + spos = fp.tell() + except IOError: + spos = None + self.read_from_stream = False + + # If hash_algs is unset and the MD5 hasn't already been computed, + # default to an MD5 hash_alg to hash the data on-the-fly. + if hash_algs is None and not self.md5: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + def sender(http_conn, method, path, data, headers): + # This function is called repeatedly for temporary retries + # so we must be sure the file pointer is pointing at the + # start of the data. + if spos is not None and spos != fp.tell(): + fp.seek(spos) + elif spos is None and self.read_from_stream: + # if seek is not supported, and we've read from this + # stream already, then we need to abort retries to + # avoid setting bad data. + raise provider.storage_data_error( + 'Cannot retry failed request. fp does not support seeking.') + + # If the caller explicitly specified host header, tell putrequest + # not to add a second host header. Similarly for accept-encoding. + skips = {} + if boto.utils.find_matching_headers('host', headers): + skips['skip_host'] = 1 + if boto.utils.find_matching_headers('accept-encoding', headers): + skips['skip_accept_encoding'] = 1 + http_conn.putrequest(method, path, **skips) + for key in headers: + http_conn.putheader(key, headers[key]) + http_conn.endheaders() + + save_debug = self.bucket.connection.debug + self.bucket.connection.debug = 0 + # If the debuglevel < 4 we don't want to show connection + # payload, so turn off HTTP connection-level debug output (to + # be restored below). + # Use the getattr approach to allow this to work in AppEngine. + if getattr(http_conn, 'debuglevel', 0) < 4: + http_conn.set_debuglevel(0) + + data_len = 0 + if cb: + if size: + cb_size = size + elif self.size: + cb_size = self.size + else: + cb_size = 0 + if chunked_transfer and cb_size == 0: + # For chunked Transfer, we call the cb for every 1MB + # of data transferred, except when we know size. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int( + math.ceil(cb_size / self.BufferSize / (num_cb - 1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + + bytes_togo = size + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + + if not isinstance(chunk, bytes): + chunk = chunk.encode('utf-8') + + if spos is None: + # read at least something from a non-seekable fp. + self.read_from_stream = True + while chunk: + chunk_len = len(chunk) + data_len += chunk_len + if chunked_transfer: + http_conn.send('%x;\r\n' % chunk_len) + http_conn.send(chunk) + http_conn.send('\r\n') + else: + http_conn.send(chunk) + for alg in digesters: + digesters[alg].update(chunk) + if bytes_togo: + bytes_togo -= chunk_len + if bytes_togo <= 0: + break + if cb: + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + if bytes_togo and bytes_togo < self.BufferSize: + chunk = fp.read(bytes_togo) + else: + chunk = fp.read(self.BufferSize) + + if not isinstance(chunk, bytes): + chunk = chunk.encode('utf-8') + + self.size = data_len + + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + + if chunked_transfer: + http_conn.send('0\r\n') + # http_conn.send("Content-MD5: %s\r\n" % self.base64md5) + http_conn.send('\r\n') + + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + + http_conn.set_debuglevel(save_debug) + self.bucket.connection.debug = save_debug + response = http_conn.getresponse() + body = response.read() + + if not self.should_retry(response, chunked_transfer): + raise provider.storage_response_error( + response.status, response.reason, body) + + return response + + if not headers: + headers = {} + else: + headers = headers.copy() + # Overwrite user-supplied user-agent. + for header in find_matching_headers('User-Agent', headers): + del headers[header] + headers['User-Agent'] = UserAgent + # If storage_class is None, then a user has not explicitly requested + # a storage class, so we can assume STANDARD here + if self._storage_class not in [None, 'STANDARD']: + headers[provider.storage_class_header] = self.storage_class + if find_matching_headers('Content-Encoding', headers): + self.content_encoding = merge_headers_by_name( + 'Content-Encoding', headers) + if find_matching_headers('Content-Language', headers): + self.content_language = merge_headers_by_name( + 'Content-Language', headers) + content_type_headers = find_matching_headers('Content-Type', headers) + if content_type_headers: + # Some use cases need to suppress sending of the Content-Type + # header and depend on the receiving server to set the content + # type. This can be achieved by setting headers['Content-Type'] + # to None when calling this method. + if (len(content_type_headers) == 1 and + headers[content_type_headers[0]] is None): + # Delete null Content-Type value to skip sending that header. + del headers[content_type_headers[0]] + else: + self.content_type = merge_headers_by_name( + 'Content-Type', headers) + elif self.path: + self.content_type = mimetypes.guess_type(self.path)[0] + if self.content_type is None: + self.content_type = self.DefaultContentType + headers['Content-Type'] = self.content_type + else: + headers['Content-Type'] = self.content_type + if self.base64md5: + headers['Content-MD5'] = self.base64md5 + if chunked_transfer: + headers['Transfer-Encoding'] = 'chunked' + #if not self.base64md5: + # headers['Trailer'] = "Content-MD5" + else: + headers['Content-Length'] = str(self.size) + # This is terrible. We need a SHA256 of the body for SigV4, but to do + # the chunked ``sender`` behavior above, the ``fp`` isn't available to + # the auth mechanism (because closures). Detect if it's SigV4 & embelish + # while we can before the auth calculations occur. + if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability(): + kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256} + if size is not None: + kwargs['size'] = size + headers['_sha256'] = compute_hash(**kwargs)[0] + headers['Expect'] = '100-Continue' + headers = boto.utils.merge_meta(headers, self.metadata, provider) + resp = self.bucket.connection.make_request( + 'PUT', + self.bucket.name, + self.name, + headers, + sender=sender, + query_args=query_args + ) + self.handle_version_headers(resp, force=True) + self.handle_addl_headers(resp.getheaders()) + + def should_retry(self, response, chunked_transfer=False): + provider = self.bucket.connection.provider + + if not chunked_transfer: + if response.status in [500, 503]: + # 500 & 503 can be plain retries. + return True + + if response.getheader('location'): + # If there's a redirect, plain retry. + return True + + if 200 <= response.status <= 299: + self.etag = response.getheader('etag') + md5 = self.md5 + if isinstance(md5, bytes): + md5 = md5.decode('utf-8') + + # If you use customer-provided encryption keys, the ETag value that + # Amazon S3 returns in the response will not be the MD5 of the + # object. + server_side_encryption_customer_algorithm = response.getheader( + 'x-amz-server-side-encryption-customer-algorithm', None) + if server_side_encryption_customer_algorithm is None: + if self.etag != '"%s"' % md5: + raise provider.storage_data_error( + 'ETag from S3 did not match computed MD5. ' + '%s vs. %s' % (self.etag, self.md5)) + + return True + + if response.status == 400: + # The 400 must be trapped so the retry handler can check to + # see if it was a timeout. + # If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb + # out. + body = response.read() + err = provider.storage_response_error( + response.status, + response.reason, + body + ) + + if err.error_code in ['RequestTimeout']: + raise PleaseRetryException( + "Saw %s, retrying" % err.error_code, + response=response + ) + + return False + + def compute_md5(self, fp, size=None): + """ + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file + pointer will be reset to the same position before the + method returns. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being split + in place into different parts. Less bytes may be available. + """ + hex_digest, b64_digest, data_size = compute_md5(fp, size=size) + # Returned values are MD5 hash, base64 encoded MD5 hash, and data size. + # The internal implementation of compute_md5() needs to return the + # data size but we don't want to return that value to the external + # caller because it changes the class interface (i.e. it might + # break some code) so we consume the third tuple value here and + # return the remainder of the tuple to the caller, thereby preserving + # the existing interface. + self.size = data_size + return (hex_digest, b64_digest) + + def set_contents_from_stream(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, + reduced_redundancy=False, query_args=None, + size=None): + """ + Store an object using the name of the Key object as the key in + cloud and the contents of the data stream pointed to by 'fp' as + the contents. + + The stream object is not seekable and total size is not known. + This has the implication that we can't specify the + Content-Size and Content-MD5 in the header. So for huge + uploads, the delay in calculating MD5 is avoided but with a + penalty of inability to verify the integrity of the uploaded + data. + + :type fp: file + :param fp: the file whose contents are to be uploaded + + :type headers: dict + :param headers: additional HTTP headers to be sent with the + PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will first check + to see if an object exists in the bucket with the same key. If it + does, it won't overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to GS and the second representing the + total number of bytes that need to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter, this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.gs.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key + in GS. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading a + file in multiple parts where you are splitting the file up + into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + """ + + provider = self.bucket.connection.provider + if not provider.supports_chunked_transfer(): + raise BotoClientError('%s does not support chunked transfer' + % provider.get_provider_name()) + + # Name of the Object should be specified explicitly for Streams. + if not self.name or self.name == '': + raise BotoClientError('Cannot determine the destination ' + 'object name for the given stream') + + if headers is None: + headers = {} + if policy: + headers[provider.acl_header] = policy + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + + if self.bucket is not None: + if not replace: + if self.bucket.lookup(self.name): + return + self.send_file(fp, headers, cb, num_cb, query_args, + chunked_transfer=True, size=size) + + def set_contents_from_file(self, fp, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, query_args=None, + encrypt_key=False, size=None, rewind=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file pointed to by 'fp' as the + contents. The data is read from 'fp' from its current position until + 'size' bytes have been read or EOF. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: Additional HTTP headers that will be sent with + the PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method will + first check to see if an object exists in the bucket with + the same key. If it does, it won't overwrite it. The + default value is True which will overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + + :type size: int + :param size: (optional) The Maximum number of bytes to read + from the file pointer (fp). This is useful when uploading + a file in multiple parts where you are splitting the file + up into different ranges to be uploaded. If not specified, + the default behaviour is to read all bytes from the file + pointer. Less bytes may be available. + + :type rewind: bool + :param rewind: (optional) If True, the file pointer (fp) will + be rewound to the start before any bytes are read from + it. The default behaviour is False which reads from the + current position of the file pointer (fp). + + :rtype: int + :return: The number of bytes written to the key. + """ + provider = self.bucket.connection.provider + headers = headers or {} + if policy: + headers[provider.acl_header] = policy + if encrypt_key: + headers[provider.server_side_encryption_header] = 'AES256' + + if rewind: + # caller requests reading from beginning of fp. + fp.seek(0, os.SEEK_SET) + else: + # The following seek/tell/seek logic is intended + # to detect applications using the older interface to + # set_contents_from_file(), which automatically rewound the + # file each time the Key was reused. This changed with commit + # 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads + # split into multiple parts and uploaded in parallel, and at + # the time of that commit this check was added because otherwise + # older programs would get a success status and upload an empty + # object. Unfortuantely, it's very inefficient for fp's implemented + # by KeyFile (used, for example, by gsutil when copying between + # providers). So, we skip the check for the KeyFile case. + # TODO: At some point consider removing this seek/tell/seek + # logic, after enough time has passed that it's unlikely any + # programs remain that assume the older auto-rewind interface. + if not isinstance(fp, KeyFile): + spos = fp.tell() + fp.seek(0, os.SEEK_END) + if fp.tell() == spos: + fp.seek(0, os.SEEK_SET) + if fp.tell() != spos: + # Raise an exception as this is likely a programming + # error whereby there is data before the fp but nothing + # after it. + fp.seek(spos) + raise AttributeError('fp is at EOF. Use rewind option ' + 'or seek() to data start.') + # seek back to the correct position. + fp.seek(spos) + + if reduced_redundancy: + self.storage_class = 'REDUCED_REDUNDANCY' + if provider.storage_class_header: + headers[provider.storage_class_header] = self.storage_class + # TODO - What if provider doesn't support reduced reduncancy? + # What if different providers provide different classes? + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket is not None: + if not md5 and provider.supports_chunked_transfer(): + # defer md5 calculation to on the fly and + # we don't know anything about size yet. + chunked_transfer = True + self.size = None + else: + chunked_transfer = False + if isinstance(fp, KeyFile): + # Avoid EOF seek for KeyFile case as it's very inefficient. + key = fp.getkey() + size = key.size - fp.tell() + self.size = size + # At present both GCS and S3 use MD5 for the etag for + # non-multipart-uploaded objects. If the etag is 32 hex + # chars use it as an MD5, to avoid having to read the file + # twice while transferring. + if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)): + etag = key.etag.strip('"') + md5 = (etag, base64.b64encode(binascii.unhexlify(etag))) + if not md5: + # compute_md5() and also set self.size to actual + # size of the bytes read computing the md5. + md5 = self.compute_md5(fp, size) + # adjust size if required + size = self.size + elif size: + self.size = size + else: + # If md5 is provided, still need to size so + # calculate based on bytes to end of content + spos = fp.tell() + fp.seek(0, os.SEEK_END) + self.size = fp.tell() - spos + fp.seek(spos) + size = self.size + self.md5 = md5[0] + self.base64md5 = md5[1] + + if self.name is None: + self.name = self.md5 + if not replace: + if self.bucket.lookup(self.name): + return + + self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb, + query_args=query_args, + chunked_transfer=chunked_transfer, size=size) + # return number of bytes written. + return self.size + + def set_contents_from_filename(self, filename, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto S3 + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file + if it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object + will be encrypted on the server-side by S3 and will be + stored in an encrypted form while at rest in S3. + + :rtype: int + :return: The number of bytes written to the key. + """ + with open(filename, 'rb') as fp: + return self.set_contents_from_file(fp, headers, replace, cb, + num_cb, policy, md5, + reduced_redundancy, + encrypt_key=encrypt_key) + + def set_contents_from_string(self, string_data, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False, + encrypt_key=False): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the + request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if + it already exists. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + num_cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the + new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 + checksum of the file as the first element and the + Base64-encoded version of the plain checksum as the second + element. This is the same format returned by the + compute_md5 method. + :param md5: If you need to compute the MD5 for any reason + prior to upload, it's silly to have to do it twice so this + param, if present, will be used as the MD5 values of the + file. Otherwise, the checksum will be computed. + + :type reduced_redundancy: bool + :param reduced_redundancy: If True, this will set the storage + class of the new Key to be REDUCED_REDUNDANCY. The Reduced + Redundancy Storage (RRS) feature of S3, provides lower + redundancy at lower storage cost. + + :type encrypt_key: bool + :param encrypt_key: If True, the new copy of the object will + be encrypted on the server-side by S3 and will be stored + in an encrypted form while at rest in S3. + """ + if not isinstance(string_data, bytes): + string_data = string_data.encode("utf-8") + fp = BytesIO(string_data) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, + policy, md5, reduced_redundancy, + encrypt_key=encrypt_key) + fp.close() + return r + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None): + """ + Retrieves a file from an S3 Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type override_num_retries: int + :param override_num_retries: If not None will override configured + num_retries parameter for underlying GET. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb, + torrent=torrent, version_id=version_id, + override_num_retries=override_num_retries, + response_headers=response_headers, + hash_algs=None, + query_args=None) + + def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, override_num_retries=None, + response_headers=None, hash_algs=None, query_args=None): + if headers is None: + headers = {} + save_debug = self.bucket.connection.debug + if self.bucket.connection.debug == 1: + self.bucket.connection.debug = 0 + + query_args = query_args or [] + if torrent: + query_args.append('torrent') + + if hash_algs is None and not torrent: + hash_algs = {'md5': md5} + digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {}) + + # If a version_id is passed in, use that. If not, check to see + # if the Key object has an explicit version_id and, if so, use that. + # Otherwise, don't pass a version_id query param. + if version_id is None: + version_id = self.version_id + if version_id: + query_args.append('versionId=%s' % version_id) + if response_headers: + for key in response_headers: + query_args.append('%s=%s' % ( + key, urllib.parse.quote(response_headers[key]))) + query_args = '&'.join(query_args) + self.open('r', headers, query_args=query_args, + override_num_retries=override_num_retries) + + data_len = 0 + if cb: + if self.size is None: + cb_size = 0 + else: + cb_size = self.size + if self.size is None and num_cb != -1: + # If size is not available due to chunked transfer for example, + # we'll call the cb for every 1MB of data transferred. + cb_count = (1024 * 1024) / self.BufferSize + elif num_cb > 1: + cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0))) + elif num_cb < 0: + cb_count = -1 + else: + cb_count = 0 + i = 0 + cb(data_len, cb_size) + try: + for bytes in self: + fp.write(bytes) + data_len += len(bytes) + for alg in digesters: + digesters[alg].update(bytes) + if cb: + if cb_size > 0 and data_len >= cb_size: + break + i += 1 + if i == cb_count or cb_count == -1: + cb(data_len, cb_size) + i = 0 + except IOError as e: + if e.errno == errno.ENOSPC: + raise StorageDataError('Out of space for destination file ' + '%s' % fp.name) + raise + if cb and (cb_count <= 1 or i > 0) and data_len > 0: + cb(data_len, cb_size) + for alg in digesters: + self.local_hashes[alg] = digesters[alg].digest() + if self.size is None and not torrent and "Range" not in headers: + self.size = data_len + self.close() + self.bucket.connection.debug = save_debug + + def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): + """ + Get a torrent file (see to get_file) + + :type fp: file + :param fp: The file pointer of where to put the torrent + + :type headers: dict + :param headers: Headers to be passed + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + """ + return self.get_file(fp, headers, cb, num_cb, torrent=True) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with + the GET request. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent + file as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + if self.bucket is not None: + if res_download_handler: + res_download_handler.get_file(self, fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id) + else: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + + def get_contents_to_filename(self, filename, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + res_download_handler=None, + response_headers=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Store contents of the object to a file named by 'filename'. + See get_contents_to_file method for details about the + parameters. + + :type filename: string + :param filename: The filename of where to put the file contents + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type res_upload_handler: ResumableDownloadHandler + :param res_download_handler: If provided, this handler will + perform the download. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + """ + try: + with open(filename, 'wb') as fp: + self.get_contents_to_file(fp, headers, cb, num_cb, + torrent=torrent, + version_id=version_id, + res_download_handler=res_download_handler, + response_headers=response_headers) + except Exception: + os.remove(filename) + raise + # if last_modified date was sent from s3, try to set file's timestamp + if self.last_modified is not None: + try: + modified_tuple = email.utils.parsedate_tz(self.last_modified) + modified_stamp = int(email.utils.mktime_tz(modified_tuple)) + os.utime(fp.name, (modified_stamp, modified_stamp)) + except Exception: + pass + + def get_contents_as_string(self, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None, + response_headers=None, encoding=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Return the contents of the object as a string. + See get_contents_to_file method for details about the + parameters. + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two + integer parameters, the first representing the number of + bytes that have been successfully transmitted to S3 and + the second representing the size of the to be transmitted + object. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the + cb parameter this parameter determines the granularity of + the callback by defining the maximum number of times the + callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file + as a string. + + :type response_headers: dict + :param response_headers: A dictionary containing HTTP + headers/values that will override any headers associated + with the stored object in the response. See + http://goo.gl/EWOPb for details. + + :type version_id: str + :param version_id: The ID of a particular version of the object. + If this parameter is not supplied but the Key object has + a ``version_id`` attribute, that value will be used when + retrieving the object. You can set the Key object's + ``version_id`` attribute to None to always grab the latest + version from a version-enabled bucket. + + :type encoding: str + :param encoding: The text encoding to use, such as ``utf-8`` + or ``iso-8859-1``. If set, then a string will be returned. + Defaults to ``None`` and returns bytes. + + :rtype: bytes or str + :returns: The contents of the file as bytes or a string + """ + fp = BytesIO() + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id, + response_headers=response_headers) + value = fp.getvalue() + + if encoding is not None: + value = value.decode(encoding) + + return value + + def add_email_grant(self, permission, email_address, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a key. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the + command will apply the grant to all keys within the bucket + or not. The default value is False. By passing a True + value, the call will iterate through all keys in the + bucket and apply the same grant to each key. CAUTION: If + you have a lot of keys, this could take a long time! + """ + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + + def add_user_grant(self, permission, user_id, headers=None, + display_name=None): + """ + Convenience method that provides a quick way to add a canonical + user grant to a key. This method retrieves the current ACL, + creates a new grant based on the parameters passed in, adds that + grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS + account your are granting the permission to. + + :type display_name: string + :param display_name: An option string containing the user's + Display Name. Only required on Walrus. + """ + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id, + display_name=display_name) + self.set_acl(policy, headers=headers) + + def _normalize_metadata(self, metadata): + if type(metadata) == set: + norm_metadata = set() + for k in metadata: + norm_metadata.add(k.lower()) + else: + norm_metadata = {} + for k in metadata: + norm_metadata[k.lower()] = metadata[k] + return norm_metadata + + def _get_remote_metadata(self, headers=None): + """ + Extracts metadata from existing URI into a dict, so we can + overwrite/delete from it to form the new set of metadata to apply to a + key. + """ + metadata = {} + for underscore_name in self._underscore_base_user_settable_fields: + if hasattr(self, underscore_name): + value = getattr(self, underscore_name) + if value: + # Generate HTTP field name corresponding to "_" named field. + field_name = underscore_name.replace('_', '-') + metadata[field_name.lower()] = value + # self.metadata contains custom metadata, which are all user-settable. + prefix = self.provider.metadata_prefix + for underscore_name in self.metadata: + field_name = underscore_name.replace('_', '-') + metadata['%s%s' % (prefix, field_name.lower())] = ( + self.metadata[underscore_name]) + return metadata + + def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + metadata_plus = self._normalize_metadata(metadata_plus) + metadata_minus = self._normalize_metadata(metadata_minus) + metadata = self._get_remote_metadata() + metadata.update(metadata_plus) + for h in metadata_minus: + if h in metadata: + del metadata[h] + src_bucket = self.bucket + # Boto prepends the meta prefix when adding headers, so strip prefix in + # metadata before sending back in to copy_key() call. + rewritten_metadata = {} + for h in metadata: + if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')): + rewritten_h = (h.replace('x-goog-meta-', '') + .replace('x-amz-meta-', '')) + else: + rewritten_h = h + rewritten_metadata[rewritten_h] = metadata[h] + metadata = rewritten_metadata + src_bucket.copy_key(self.name, self.bucket.name, self.name, + metadata=metadata, preserve_acl=preserve_acl, + headers=headers) + + def restore(self, days, headers=None): + """Restore an object from an archive. + + :type days: int + :param days: The lifetime of the restored object (must + be at least 1 day). If the object is already restored + then this parameter can be used to readjust the lifetime + of the restored object. In this case, the days + param is with respect to the initial time of the request. + If the object has not been restored, this param is with + respect to the completion time of the request. + + """ + response = self.bucket.connection.make_request( + 'POST', self.bucket.name, self.name, + data=self.RestoreBody % days, + headers=headers, query_args='restore') + if response.status not in (200, 202): + provider = self.bucket.connection.provider + raise provider.storage_response_error(response.status, + response.reason, + response.read()) diff --git a/ext/boto/s3/keyfile.py b/ext/boto/s3/keyfile.py new file mode 100644 index 0000000000..4245413d74 --- /dev/null +++ b/ext/boto/s3/keyfile.py @@ -0,0 +1,134 @@ +# Copyright 2013 Google Inc. +# Copyright 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Wrapper class to expose a Key being read via a partial implementaiton of the +Python file interface. The only functions supported are those needed for seeking +in a Key open for reading. +""" + +import os +from boto.exception import StorageResponseError + +class KeyFile(): + + def __init__(self, key): + self.key = key + self.key.open_read() + self.location = 0 + self.closed = False + self.softspace = -1 # Not implemented. + self.mode = 'r' + self.encoding = 'Undefined in KeyFile' + self.errors = 'Undefined in KeyFile' + self.newlines = 'Undefined in KeyFile' + self.name = key.name + + def tell(self): + if self.location is None: + raise ValueError("I/O operation on closed file") + return self.location + + def seek(self, pos, whence=os.SEEK_SET): + self.key.close(fast=True) + if whence == os.SEEK_END: + # We need special handling for this case because sending an HTTP range GET + # with EOF for the range start would cause an invalid range error. Instead + # we position to one before EOF (plus pos) and then read one byte to + # position at EOF. + if self.key.size == 0: + # Don't try to seek with an empty key. + return + pos = self.key.size + pos - 1 + if pos < 0: + raise IOError("Invalid argument") + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + self.key.read(1) + self.location = pos + 1 + return + + if whence == os.SEEK_SET: + if pos < 0: + raise IOError("Invalid argument") + elif whence == os.SEEK_CUR: + pos += self.location + else: + raise IOError('Invalid whence param (%d) passed to seek' % whence) + try: + self.key.open_read(headers={"Range": "bytes=%d-" % pos}) + except StorageResponseError as e: + # 416 Invalid Range means that the given starting byte was past the end + # of file. We catch this because the Python file interface allows silently + # seeking past the end of the file. + if e.status != 416: + raise + + self.location = pos + + def read(self, size): + self.location += size + return self.key.read(size) + + def close(self): + self.key.close() + self.location = None + self.closed = True + + def isatty(self): + return False + + # Non-file interface, useful for code that wants to dig into underlying Key + # state. + def getkey(self): + return self.key + + # Unimplemented interfaces below here. + + def write(self, buf): + raise NotImplementedError('write not implemented in KeyFile') + + def fileno(self): + raise NotImplementedError('fileno not implemented in KeyFile') + + def flush(self): + raise NotImplementedError('flush not implemented in KeyFile') + + def next(self): + raise NotImplementedError('next not implemented in KeyFile') + + def readinto(self): + raise NotImplementedError('readinto not implemented in KeyFile') + + def readline(self): + raise NotImplementedError('readline not implemented in KeyFile') + + def readlines(self): + raise NotImplementedError('readlines not implemented in KeyFile') + + def truncate(self): + raise NotImplementedError('truncate not implemented in KeyFile') + + def writelines(self): + raise NotImplementedError('writelines not implemented in KeyFile') + + def xreadlines(self): + raise NotImplementedError('xreadlines not implemented in KeyFile') diff --git a/ext/boto/s3/lifecycle.py b/ext/boto/s3/lifecycle.py new file mode 100644 index 0000000000..bd8645965c --- /dev/null +++ b/ext/boto/s3/lifecycle.py @@ -0,0 +1,311 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.compat import six + +class Rule(object): + """ + A Lifecycle rule for an S3 bucket. + + :ivar id: Unique identifier for the rule. The value cannot be longer + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. + + :ivar prefix: Prefix identifying one or more objects to which the + rule applies. If prefix is not provided, Boto generates a default + prefix which will match all objects. + + :ivar status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. + + :ivar expiration: An instance of `Expiration`. This indicates + the lifetime of the objects that are subject to the rule. + + :ivar transition: An instance of `Transition`. This indicates + when to transition to a different storage class. + + """ + def __init__(self, id=None, prefix=None, status=None, expiration=None, + transition=None): + self.id = id + self.prefix = '' if prefix is None else prefix + self.status = status + if isinstance(expiration, six.integer_types): + # retain backwards compatibility??? + self.expiration = Expiration(days=expiration) + else: + # None or object + self.expiration = expiration + + # retain backwards compatibility + if isinstance(transition, Transition): + self.transition = Transitions() + self.transition.append(transition) + elif transition: + self.transition = transition + else: + self.transition = Transitions() + + def __repr__(self): + return '' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Transition': + return self.transition + elif name == 'Expiration': + self.expiration = Expiration() + return self.expiration + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'Prefix': + self.prefix = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + if self.id is not None: + s += '%s' % self.id + s += '%s' % self.prefix + s += '%s' % self.status + if self.expiration is not None: + s += self.expiration.to_xml() + if self.transition is not None: + s += self.transition.to_xml() + s += '' + return s + +class Expiration(object): + """ + When an object will expire. + + :ivar days: The number of days until the object expires + + :ivar date: The date when the object will expire. Must be + in ISO 8601 format. + """ + def __init__(self, days=None, date=None): + self.days = days + self.date = date + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.days = int(value) + elif name == 'Date': + self.date = value + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % how_long + + def to_xml(self): + s = '' + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Transition(object): + """ + A transition to a different storage class. + + :ivar days: The number of days until the object should be moved. + + :ivar date: The date when the object should be moved. Should be + in ISO 8601 format. + + :ivar storage_class: The storage class to transition to. Valid + values are GLACIER, STANDARD_IA. + """ + def __init__(self, days=None, date=None, storage_class=None): + self.days = days + self.date = date + self.storage_class = storage_class + + def __repr__(self): + if self.days is None: + how_long = "on: %s" % self.date + else: + how_long = "in: %s days" % self.days + return '' % (how_long, self.storage_class) + + def to_xml(self): + s = '' + s += '%s' % self.storage_class + if self.days is not None: + s += '%s' % self.days + elif self.date is not None: + s += '%s' % self.date + s += '' + return s + +class Transitions(list): + """ + A container for the transitions associated with a Lifecycle's Rule configuration. + """ + def __init__(self): + self.transition_properties = 3 + self.current_transition_property = 1 + self.temp_days = None + self.temp_date = None + self.temp_storage_class = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Days': + self.temp_days = int(value) + elif name == 'Date': + self.temp_date = value + elif name == 'StorageClass': + self.temp_storage_class = value + + # the XML does not contain a tag + # but rather N number of tags not + # structured in any sort of hierarchy. + if self.current_transition_property == self.transition_properties: + self.append(Transition(self.temp_days, self.temp_date, self.temp_storage_class)) + self.temp_days = self.temp_date = self.temp_storage_class = None + self.current_transition_property = 1 + else: + self.current_transition_property += 1 + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + for transition in self: + s += transition.to_xml() + return s + + def add_transition(self, days=None, date=None, storage_class=None): + """ + Add a transition to this Lifecycle configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this Lifecycle config object + to the configure_lifecycle method of the Bucket object. + + :ivar days: The number of days until the object should be moved. + + :ivar date: The date when the object should be moved. Should be + in ISO 8601 format. + + :ivar storage_class: The storage class to transition to. Valid + values are GLACIER, STANDARD_IA. + """ + transition = Transition(days, date, storage_class) + self.append(transition) + + def __first_or_default(self, prop): + for transition in self: + return getattr(transition, prop) + return None + + # maintain backwards compatibility so that we can continue utilizing + # 'rule.transition.days' syntax + @property + def days(self): + return self.__first_or_default('days') + + @property + def date(self): + return self.__first_or_default('date') + + @property + def storage_class(self): + return self.__first_or_default('storage_class') + + +class Lifecycle(list): + """ + A container for the rules associated with a Lifecycle configuration. + """ + + def startElement(self, name, attrs, connection): + if name == 'Rule': + rule = Rule() + self.append(rule) + return rule + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + """ + Returns a string containing the XML version of the Lifecycle + configuration as defined by S3. + """ + s = '' + s += '' + for rule in self: + s += rule.to_xml() + s += '' + return s + + def add_rule(self, id=None, prefix='', status='Enabled', + expiration=None, transition=None): + """ + Add a rule to this Lifecycle configuration. This only adds + the rule to the local copy. To install the new rule(s) on + the bucket, you need to pass this Lifecycle config object + to the configure_lifecycle method of the Bucket object. + + :type id: str + :param id: Unique identifier for the rule. The value cannot be longer + than 255 characters. This value is optional. The server will + generate a unique value for the rule if no value is provided. + + :type prefix: str + :iparam prefix: Prefix identifying one or more objects to which the + rule applies. + + :type status: str + :param status: If 'Enabled', the rule is currently being applied. + If 'Disabled', the rule is not currently being applied. + + :type expiration: int + :param expiration: Indicates the lifetime, in days, of the objects + that are subject to the rule. The value must be a non-zero + positive integer. A Expiration object instance is also perfect. + + :type transition: Transitions + :param transition: Indicates when an object transitions to a + different storage class. + """ + rule = Rule(id, prefix, status, expiration, transition) + self.append(rule) diff --git a/ext/boto/s3/multidelete.py b/ext/boto/s3/multidelete.py new file mode 100644 index 0000000000..3e2d48e32d --- /dev/null +++ b/ext/boto/s3/multidelete.py @@ -0,0 +1,138 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto import handler +import xml.sax + +class Deleted(object): + """ + A successfully deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was deleted. + + :ivar version_id: Version id of the object that was deleted. + + :ivar delete_marker: If True, indicates the object deleted + was a DeleteMarker. + + :ivar delete_marker_version_id: Version ID of the delete marker + deleted. + """ + def __init__(self, key=None, version_id=None, + delete_marker=False, delete_marker_version_id=None): + self.key = key + self.version_id = version_id + self.delete_marker = delete_marker + self.delete_marker_version_id = delete_marker_version_id + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id) + else: + return '' % self.key + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'DeleteMarker': + if value.lower() == 'true': + self.delete_marker = True + elif name == 'DeleteMarkerVersionId': + self.delete_marker_version_id = value + else: + setattr(self, name, value) + +class Error(object): + """ + An unsuccessful deleted object in a multi-object delete request. + + :ivar key: Key name of the object that was not deleted. + + :ivar version_id: Version id of the object that was not deleted. + + :ivar code: Status code of the failed delete operation. + + :ivar message: Status message of the failed delete operation. + """ + def __init__(self, key=None, version_id=None, + code=None, message=None): + self.key = key + self.version_id = version_id + self.code = code + self.message = message + + def __repr__(self): + if self.version_id: + return '' % (self.key, self.version_id, + self.code) + else: + return '' % (self.key, self.code) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'VersionId': + self.version_id = value + elif name == 'Code': + self.code = value + elif name == 'Message': + self.message = value + else: + setattr(self, name, value) + +class MultiDeleteResult(object): + """ + The status returned from a MultiObject Delete request. + + :ivar deleted: A list of successfully deleted objects. Note that if + the quiet flag was specified in the request, this list will + be empty because only error responses would be returned. + + :ivar errors: A list of unsuccessfully deleted objects. + """ + + def __init__(self, bucket=None): + self.bucket = None + self.deleted = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name == 'Deleted': + d = Deleted() + self.deleted.append(d) + return d + elif name == 'Error': + e = Error() + self.errors.append(e) + return e + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + diff --git a/ext/boto/s3/multipart.py b/ext/boto/s3/multipart.py new file mode 100644 index 0000000000..056f9ca52a --- /dev/null +++ b/ext/boto/s3/multipart.py @@ -0,0 +1,330 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3 import user +from boto.s3 import key +from boto import handler +import xml.sax + + +class CompleteMultiPartUpload(object): + """ + Represents a completed MultiPart Upload. Contains the + following useful attributes: + + * location - The URI of the completed upload + * bucket_name - The name of the bucket in which the upload + is contained + * key_name - The name of the new, completed key + * etag - The MD5 hash of the completed, combined upload + * version_id - The version_id of the completed upload + * encrypted - The value of the encryption header + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.location = None + self.bucket_name = None + self.key_name = None + self.etag = None + self.version_id = None + self.encrypted = None + + def __repr__(self): + return '' % (self.bucket_name, + self.key_name) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Location': + self.location = value + elif name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'ETag': + self.etag = value + else: + setattr(self, name, value) + + +class Part(object): + """ + Represents a single part in a MultiPart upload. + Attributes include: + + * part_number - The integer part number + * last_modified - The last modified date of this part + * etag - The MD5 hash of this part + * size - The size, in bytes, of this part + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.part_number = None + self.last_modified = None + self.etag = None + self.size = None + + def __repr__(self): + if isinstance(self.part_number, int): + return '' % self.part_number + else: + return '' % None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'PartNumber': + self.part_number = int(value) + elif name == 'LastModified': + self.last_modified = value + elif name == 'ETag': + self.etag = value + elif name == 'Size': + self.size = int(value) + else: + setattr(self, name, value) + + +def part_lister(mpupload, part_number_marker=None): + """ + A generator function for listing parts of a multipart upload. + """ + more_results = True + part = None + while more_results: + parts = mpupload.get_all_parts(None, part_number_marker) + for part in parts: + yield part + part_number_marker = mpupload.next_part_number_marker + more_results = mpupload.is_truncated + + +class MultiPartUpload(object): + """ + Represents a MultiPart Upload operation. + """ + + def __init__(self, bucket=None): + self.bucket = bucket + self.bucket_name = None + self.key_name = None + self.id = id + self.initiator = None + self.owner = None + self.storage_class = None + self.initiated = None + self.part_number_marker = None + self.next_part_number_marker = None + self.max_parts = None + self.is_truncated = False + self._parts = None + + def __repr__(self): + return '' % self.key_name + + def __iter__(self): + return part_lister(self) + + def to_xml(self): + s = '\n' + for part in self: + s += ' \n' + s += ' %d\n' % part.part_number + s += ' %s\n' % part.etag + s += ' \n' + s += '' + return s + + def startElement(self, name, attrs, connection): + if name == 'Initiator': + self.initiator = user.User(self) + return self.initiator + elif name == 'Owner': + self.owner = user.User(self) + return self.owner + elif name == 'Part': + part = Part(self.bucket) + self._parts.append(part) + return part + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket_name = value + elif name == 'Key': + self.key_name = value + elif name == 'UploadId': + self.id = value + elif name == 'StorageClass': + self.storage_class = value + elif name == 'PartNumberMarker': + self.part_number_marker = value + elif name == 'NextPartNumberMarker': + self.next_part_number_marker = value + elif name == 'MaxParts': + self.max_parts = int(value) + elif name == 'IsTruncated': + if value == 'true': + self.is_truncated = True + else: + self.is_truncated = False + elif name == 'Initiated': + self.initiated = value + else: + setattr(self, name, value) + + def get_all_parts(self, max_parts=None, part_number_marker=None, + encoding_type=None): + """ + Return the uploaded parts of this MultiPart Upload. This is + a lower-level method that requires you to manually page through + results. To simplify this process, you can just use the + object itself as an iterator and it will automatically handle + all of the paging with S3. + """ + self._parts = [] + query_args = 'uploadId=%s' % self.id + if max_parts: + query_args += '&max-parts=%d' % max_parts + if part_number_marker: + query_args += '&part-number-marker=%s' % part_number_marker + if encoding_type: + query_args += '&encoding-type=%s' % encoding_type + response = self.bucket.connection.make_request('GET', self.bucket.name, + self.key_name, + query_args=query_args) + body = response.read() + if response.status == 200: + h = handler.XmlHandler(self, self) + xml.sax.parseString(body, h) + return self._parts + + def upload_part_from_file(self, fp, part_num, headers=None, replace=True, + cb=None, num_cb=10, md5=None, size=None): + """ + Upload another part of this MultiPart Upload. + + .. note:: + + After you initiate multipart upload and upload one or more parts, + you must either complete or abort multipart upload in order to stop + getting charged for storage of the uploaded parts. Only after you + either complete or abort multipart upload, Amazon S3 frees up the + parts storage and stops charging you for the parts storage. + + :type fp: file + :param fp: The file object you want to upload. + + :type part_num: int + :param part_num: The number of this part. + + The other parameters are exactly as defined for the + :class:`boto.s3.key.Key` set_contents_from_file method. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: The uploaded part containing the etag. + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + key = self.bucket.new_key(self.key_name) + key.set_contents_from_file(fp, headers=headers, replace=replace, + cb=cb, num_cb=num_cb, md5=md5, + reduced_redundancy=False, + query_args=query_args, size=size) + return key + + def copy_part_from_key(self, src_bucket_name, src_key_name, part_num, + start=None, end=None, src_version_id=None, + headers=None): + """ + Copy another part of this MultiPart Upload. + + :type src_bucket_name: string + :param src_bucket_name: Name of the bucket containing the source key + + :type src_key_name: string + :param src_key_name: Name of the source key + + :type part_num: int + :param part_num: The number of this part. + + :type start: int + :param start: Zero-based byte offset to start copying from + + :type end: int + :param end: Zero-based byte offset to copy to + + :type src_version_id: string + :param src_version_id: version_id of source object to copy from + + :type headers: dict + :param headers: Any headers to pass along in the request + """ + if part_num < 1: + raise ValueError('Part numbers must be greater than zero') + query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num) + if start is not None and end is not None: + rng = 'bytes=%s-%s' % (start, end) + provider = self.bucket.connection.provider + if headers is None: + headers = {} + else: + headers = headers.copy() + headers[provider.copy_source_range_header] = rng + return self.bucket.copy_key(self.key_name, src_bucket_name, + src_key_name, + src_version_id=src_version_id, + storage_class=None, + headers=headers, + query_args=query_args) + + def complete_upload(self): + """ + Complete the MultiPart Upload operation. This method should + be called when all parts of the file have been successfully + uploaded to S3. + + :rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload` + :returns: An object representing the completed upload. + """ + xml = self.to_xml() + return self.bucket.complete_multipart_upload(self.key_name, + self.id, xml) + + def cancel_upload(self): + """ + Cancels a MultiPart Upload operation. The storage consumed by + any previously uploaded parts will be freed. However, if any + part uploads are currently in progress, those part uploads + might or might not succeed. As a result, it might be necessary + to abort a given multipart upload multiple times in order to + completely free all storage consumed by all parts. + """ + self.bucket.cancel_multipart_upload(self.key_name, self.id) diff --git a/ext/boto/s3/prefix.py b/ext/boto/s3/prefix.py new file mode 100644 index 0000000000..adf28e935f --- /dev/null +++ b/ext/boto/s3/prefix.py @@ -0,0 +1,42 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Prefix(object): + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Prefix': + self.name = value + else: + setattr(self, name, value) + + @property + def provider(self): + provider = None + if self.bucket and self.bucket.connection: + provider = self.bucket.connection.provider + return provider + diff --git a/ext/boto/s3/resumable_download_handler.py b/ext/boto/s3/resumable_download_handler.py new file mode 100644 index 0000000000..a2a88c74cd --- /dev/null +++ b/ext/boto/s3/resumable_download_handler.py @@ -0,0 +1,352 @@ +# Copyright 2010 Google Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import errno +import httplib +import os +import re +import socket +import time +import boto +from boto import config, storage_uri_for_key +from boto.connection import AWSAuthConnection +from boto.exception import ResumableDownloadException +from boto.exception import ResumableTransferDisposition +from boto.s3.keyfile import KeyFile +from boto.gs.key import Key as GSKey + +""" +Resumable download handler. + +Resumable downloads will retry failed downloads, resuming at the byte count +completed by the last download attempt. If too many retries happen with no +progress (per configurable num_retries param), the download will be aborted. + +The caller can optionally specify a tracker_file_name param in the +ResumableDownloadHandler constructor. If you do this, that file will +save the state needed to allow retrying later, in a separate process +(e.g., in a later run of gsutil). + +Note that resumable downloads work across providers (they depend only +on support Range GETs), but this code is in the boto.s3 package +because it is the wrong abstraction level to go in the top-level boto +package. + +TODO: At some point we should refactor the code to have a storage_service +package where all these provider-independent files go. +""" + + +class ByteTranslatingCallbackHandler(object): + """ + Proxy class that translates progress callbacks made by + boto.s3.Key.get_file(), taking into account that we're resuming + a download. + """ + def __init__(self, proxied_cb, download_start_point): + self.proxied_cb = proxied_cb + self.download_start_point = download_start_point + + def call(self, total_bytes_uploaded, total_size): + self.proxied_cb(self.download_start_point + total_bytes_uploaded, + total_size) + + +def get_cur_file_size(fp, position_to_eof=False): + """ + Returns size of file, optionally leaving fp positioned at EOF. + """ + if isinstance(fp, KeyFile) and not position_to_eof: + # Avoid EOF seek for KeyFile case as it's very inefficient. + return fp.getkey().size + if not position_to_eof: + cur_pos = fp.tell() + fp.seek(0, os.SEEK_END) + cur_file_size = fp.tell() + if not position_to_eof: + fp.seek(cur_pos, os.SEEK_SET) + return cur_file_size + + +class ResumableDownloadHandler(object): + """ + Handler for resumable downloads. + """ + + MIN_ETAG_LEN = 5 + + RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error, + socket.gaierror) + + def __init__(self, tracker_file_name=None, num_retries=None): + """ + Constructor. Instantiate once for each downloaded file. + + :type tracker_file_name: string + :param tracker_file_name: optional file name to save tracking info + about this download. If supplied and the current process fails + the download, it can be retried in a new process. If called + with an existing file containing an unexpired timestamp, + we'll resume the transfer for this file; else we'll start a + new resumable download. + + :type num_retries: int + :param num_retries: the number of times we'll re-try a resumable + download making no progress. (Count resets every time we get + progress, so download can span many more than this number of + retries.) + """ + self.tracker_file_name = tracker_file_name + self.num_retries = num_retries + self.etag_value_for_current_download = None + if tracker_file_name: + self._load_tracker_file_etag() + # Save download_start_point in instance state so caller can + # find how much was transferred by this ResumableDownloadHandler + # (across retries). + self.download_start_point = None + + def _load_tracker_file_etag(self): + f = None + try: + f = open(self.tracker_file_name, 'r') + self.etag_value_for_current_download = f.readline().rstrip('\n') + # We used to match an MD5-based regex to ensure that the etag was + # read correctly. Since ETags need not be MD5s, we now do a simple + # length sanity check instead. + if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN: + print('Couldn\'t read etag in tracker file (%s). Restarting ' + 'download from scratch.' % self.tracker_file_name) + except IOError as e: + # Ignore non-existent file (happens first time a download + # is attempted on an object), but warn user for other errors. + if e.errno != errno.ENOENT: + # Will restart because + # self.etag_value_for_current_download is None. + print('Couldn\'t read URI tracker file (%s): %s. Restarting ' + 'download from scratch.' % + (self.tracker_file_name, e.strerror)) + finally: + if f: + f.close() + + def _save_tracker_info(self, key): + self.etag_value_for_current_download = key.etag.strip('"\'') + if not self.tracker_file_name: + return + f = None + try: + f = open(self.tracker_file_name, 'w') + f.write('%s\n' % self.etag_value_for_current_download) + except IOError as e: + raise ResumableDownloadException( + 'Couldn\'t write tracker file (%s): %s.\nThis can happen' + 'if you\'re using an incorrectly configured download tool\n' + '(e.g., gsutil configured to save tracker files to an ' + 'unwritable directory)' % + (self.tracker_file_name, e.strerror), + ResumableTransferDisposition.ABORT) + finally: + if f: + f.close() + + def _remove_tracker_file(self): + if (self.tracker_file_name and + os.path.exists(self.tracker_file_name)): + os.unlink(self.tracker_file_name) + + def _attempt_resumable_download(self, key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs): + """ + Attempts a resumable download. + + Raises ResumableDownloadException if any problems occur. + """ + cur_file_size = get_cur_file_size(fp, position_to_eof=True) + + if (cur_file_size and + self.etag_value_for_current_download and + self.etag_value_for_current_download == key.etag.strip('"\'')): + # Try to resume existing transfer. + if cur_file_size > key.size: + raise ResumableDownloadException( + '%s is larger (%d) than %s (%d).\nDeleting tracker file, so ' + 'if you re-try this download it will start from scratch' % + (fp.name, cur_file_size, str(storage_uri_for_key(key)), + key.size), ResumableTransferDisposition.ABORT) + elif cur_file_size == key.size: + if key.bucket.connection.debug >= 1: + print('Download complete.') + return + if key.bucket.connection.debug >= 1: + print('Resuming download.') + headers = headers.copy() + headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1) + cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call + self.download_start_point = cur_file_size + else: + if key.bucket.connection.debug >= 1: + print('Starting new resumable download.') + self._save_tracker_info(key) + self.download_start_point = 0 + # Truncate the file, in case a new resumable download is being + # started atop an existing file. + fp.truncate(0) + + # Disable AWSAuthConnection-level retry behavior, since that would + # cause downloads to restart from scratch. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + fp.flush() + + def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False, + version_id=None, hash_algs=None): + """ + Retrieves a file from a Key + :type key: :class:`boto.s3.key.Key` or subclass + :param key: The Key object from which upload is to be downloaded + + :type fp: file + :param fp: File pointer into which data should be downloaded + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from the storage service and + the second representing the total number of bytes that need + to be transmitted. + + :type num_cb: int + :param num_cb: (optional) If a callback is specified with the cb + parameter this parameter determines the granularity of the callback + by defining the maximum number of times the callback will be + called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + + :type version_id: string + :param version_id: The version ID (optional) + + :type hash_algs: dictionary + :param hash_algs: (optional) Dictionary of hash algorithms and + corresponding hashing class that implements update() and digest(). + Defaults to {'md5': hashlib/md5.md5}. + + Raises ResumableDownloadException if a problem occurs during + the transfer. + """ + + debug = key.bucket.connection.debug + if not headers: + headers = {} + + # Use num-retries from constructor if one was provided; else check + # for a value specified in the boto config file; else default to 6. + if self.num_retries is None: + self.num_retries = config.getint('Boto', 'num_retries', 6) + progress_less_iterations = 0 + + while True: # Retry as long as we're making progress. + had_file_bytes_before_attempt = get_cur_file_size(fp) + try: + self._attempt_resumable_download(key, fp, headers, cb, num_cb, + torrent, version_id, hash_algs) + # Download succceded, so remove the tracker file (if have one). + self._remove_tracker_file() + # Previously, check_final_md5() was called here to validate + # downloaded file's checksum, however, to be consistent with + # non-resumable downloads, this call was removed. Checksum + # validation of file contents should be done by the caller. + if debug >= 1: + print('Resumable download complete.') + return + except self.RETRYABLE_EXCEPTIONS as e: + if debug >= 1: + print('Caught exception (%s)' % e.__repr__()) + if isinstance(e, IOError) and e.errno == errno.EPIPE: + # Broken pipe error causes httplib to immediately + # close the socket (http://bugs.python.org/issue5542), + # so we need to close and reopen the key before resuming + # the download. + if isinstance(key, GSKey): + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0, hash_algs=hash_algs) + else: + key.get_file(fp, headers, cb, num_cb, torrent, version_id, + override_num_retries=0) + except ResumableDownloadException as e: + if (e.disposition == + ResumableTransferDisposition.ABORT_CUR_PROCESS): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s)' % e.message) + raise + elif (e.disposition == + ResumableTransferDisposition.ABORT): + if debug >= 1: + print('Caught non-retryable ResumableDownloadException ' + '(%s); aborting and removing tracker file' % + e.message) + self._remove_tracker_file() + raise + else: + if debug >= 1: + print('Caught ResumableDownloadException (%s) - will ' + 'retry' % e.message) + + # At this point we had a re-tryable failure; see if made progress. + if get_cur_file_size(fp) > had_file_bytes_before_attempt: + progress_less_iterations = 0 + else: + progress_less_iterations += 1 + + if progress_less_iterations > self.num_retries: + # Don't retry any longer in the current process. + raise ResumableDownloadException( + 'Too many resumable download attempts failed without ' + 'progress. You might try this download again later', + ResumableTransferDisposition.ABORT_CUR_PROCESS) + + # Close the key, in case a previous download died partway + # through and left data in the underlying key HTTP buffer. + # Do this within a try/except block in case the connection is + # closed (since key.close() attempts to do a final read, in which + # case this read attempt would get an IncompleteRead exception, + # which we can safely ignore. + try: + key.close() + except httplib.IncompleteRead: + pass + + sleep_time_secs = 2**progress_less_iterations + if debug >= 1: + print('Got retryable failure (%d progress-less in a row).\n' + 'Sleeping %d seconds before re-trying' % + (progress_less_iterations, sleep_time_secs)) + time.sleep(sleep_time_secs) diff --git a/ext/boto/s3/tagging.py b/ext/boto/s3/tagging.py new file mode 100644 index 0000000000..0af6406fb1 --- /dev/null +++ b/ext/boto/s3/tagging.py @@ -0,0 +1,71 @@ +from boto import handler +import xml.sax + + +class Tag(object): + def __init__(self, key=None, value=None): + self.key = key + self.value = value + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.key = value + elif name == 'Value': + self.value = value + + def to_xml(self): + return '%s%s' % ( + self.key, self.value) + + def __eq__(self, other): + return (self.key == other.key and self.value == other.value) + + +class TagSet(list): + def startElement(self, name, attrs, connection): + if name == 'Tag': + tag = Tag() + self.append(tag) + return tag + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def add_tag(self, key, value): + tag = Tag(key, value) + self.append(tag) + + def to_xml(self): + xml = '' + for tag in self: + xml += tag.to_xml() + xml += '' + return xml + + +class Tags(list): + """A container for the tags associated with a bucket.""" + + def startElement(self, name, attrs, connection): + if name == 'TagSet': + tag_set = TagSet() + self.append(tag_set) + return tag_set + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + def to_xml(self): + xml = '' + for tag_set in self: + xml += tag_set.to_xml() + xml +='' + return xml + + def add_tag_set(self, tag_set): + self.append(tag_set) diff --git a/ext/boto/s3/user.py b/ext/boto/s3/user.py new file mode 100644 index 0000000000..f2cbbb7f8f --- /dev/null +++ b/ext/boto/s3/user.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class User(object): + def __init__(self, parent=None, id='', display_name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.display_name = display_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DisplayName': + self.display_name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s xsi:type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + s += '%s' % self.display_name + s += '' % element_name + return s diff --git a/ext/boto/s3/website.py b/ext/boto/s3/website.py new file mode 100644 index 0000000000..c307f3e990 --- /dev/null +++ b/ext/boto/s3/website.py @@ -0,0 +1,293 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +def tag(key, value): + start = '<%s>' % key + end = '' % key + return '%s%s%s' % (start, value, end) + + +class WebsiteConfiguration(object): + """ + Website configuration for a bucket. + + :ivar suffix: Suffix that is appended to a request that is for a + "directory" on the website endpoint (e.g. if the suffix is + index.html and you make a request to samplebucket/images/ + the data that is returned will be for the object with the + key name images/index.html). The suffix must not be empty + and must not include a slash character. + + :ivar error_key: The object key name to use when a 4xx class error + occurs. This key identifies the page that is returned when + such an error occurs. + + :ivar redirect_all_requests_to: Describes the redirect behavior for every + request to this bucket's website endpoint. If this value is non None, + no other values are considered when configuring the website + configuration for the bucket. This is an instance of + ``RedirectLocation``. + + :ivar routing_rules: ``RoutingRules`` object which specifies conditions + and redirects that apply when the conditions are met. + + """ + + def __init__(self, suffix=None, error_key=None, + redirect_all_requests_to=None, routing_rules=None): + self.suffix = suffix + self.error_key = error_key + self.redirect_all_requests_to = redirect_all_requests_to + if routing_rules is not None: + self.routing_rules = routing_rules + else: + self.routing_rules = RoutingRules() + + def startElement(self, name, attrs, connection): + if name == 'RoutingRules': + self.routing_rules = RoutingRules() + return self.routing_rules + elif name == 'IndexDocument': + return _XMLKeyValue([('Suffix', 'suffix')], container=self) + elif name == 'ErrorDocument': + return _XMLKeyValue([('Key', 'error_key')], container=self) + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = ['', + ''] + if self.suffix is not None: + parts.append(tag('IndexDocument', tag('Suffix', self.suffix))) + if self.error_key is not None: + parts.append(tag('ErrorDocument', tag('Key', self.error_key))) + if self.redirect_all_requests_to is not None: + parts.append(self.redirect_all_requests_to.to_xml()) + if self.routing_rules: + parts.append(self.routing_rules.to_xml()) + parts.append('') + return ''.join(parts) + + +class _XMLKeyValue(object): + def __init__(self, translator, container=None): + self.translator = translator + if container: + self.container = container + else: + self.container = self + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + for xml_key, attr_name in self.translator: + if name == xml_key: + setattr(self.container, attr_name, value) + + def to_xml(self): + parts = [] + for xml_key, attr_name in self.translator: + content = getattr(self.container, attr_name) + if content is not None: + parts.append(tag(xml_key, content)) + return ''.join(parts) + + +class RedirectLocation(_XMLKeyValue): + """Specify redirect behavior for every request to a bucket's endpoint. + + :ivar hostname: Name of the host where requests will be redirected. + + :ivar protocol: Protocol to use (http, https) when redirecting requests. + The default is the protocol that is used in the original request. + + """ + TRANSLATOR = [('HostName', 'hostname'), + ('Protocol', 'protocol'), + ] + + def __init__(self, hostname=None, protocol=None): + self.hostname = hostname + self.protocol = protocol + super(RedirectLocation, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('RedirectAllRequestsTo', + super(RedirectLocation, self).to_xml()) + + +class RoutingRules(list): + + def add_rule(self, rule): + """ + + :type rule: :class:`boto.s3.website.RoutingRule` + :param rule: A routing rule. + + :return: This ``RoutingRules`` object is returned, + so that it can chain subsequent calls. + + """ + self.append(rule) + return self + + def startElement(self, name, attrs, connection): + if name == 'RoutingRule': + rule = RoutingRule(Condition(), Redirect()) + self.add_rule(rule) + return rule + + def endElement(self, name, value, connection): + pass + + def __repr__(self): + return "RoutingRules(%s)" % super(RoutingRules, self).__repr__() + + def to_xml(self): + inner_text = [] + for rule in self: + inner_text.append(rule.to_xml()) + return tag('RoutingRules', '\n'.join(inner_text)) + + +class RoutingRule(object): + """Represents a single routing rule. + + There are convenience methods to making creating rules + more concise:: + + rule = RoutingRule.when(key_prefix='foo/').then_redirect('example.com') + + :ivar condition: Describes condition that must be met for the + specified redirect to apply. + + :ivar redirect: Specifies redirect behavior. You can redirect requests to + another host, to another page, or with another protocol. In the event + of an error, you can can specify a different error code to return. + + """ + def __init__(self, condition=None, redirect=None): + self.condition = condition + self.redirect = redirect + + def startElement(self, name, attrs, connection): + if name == 'Condition': + return self.condition + elif name == 'Redirect': + return self.redirect + + def endElement(self, name, value, connection): + pass + + def to_xml(self): + parts = [] + if self.condition: + parts.append(self.condition.to_xml()) + if self.redirect: + parts.append(self.redirect.to_xml()) + return tag('RoutingRule', '\n'.join(parts)) + + @classmethod + def when(cls, key_prefix=None, http_error_code=None): + return cls(Condition(key_prefix=key_prefix, + http_error_code=http_error_code), None) + + def then_redirect(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.redirect = Redirect( + hostname=hostname, protocol=protocol, + replace_key=replace_key, + replace_key_prefix=replace_key_prefix, + http_redirect_code=http_redirect_code) + return self + + +class Condition(_XMLKeyValue): + """ + :ivar key_prefix: The object key name prefix when the redirect is applied. + For example, to redirect requests for ExamplePage.html, the key prefix + will be ExamplePage.html. To redirect request for all pages with the + prefix docs/, the key prefix will be /docs, which identifies all + objects in the docs/ folder. + + :ivar http_error_code: The HTTP error code when the redirect is applied. In + the event of an error, if the error code equals this value, then the + specified redirect is applied. + + """ + TRANSLATOR = [ + ('KeyPrefixEquals', 'key_prefix'), + ('HttpErrorCodeReturnedEquals', 'http_error_code'), + ] + + def __init__(self, key_prefix=None, http_error_code=None): + self.key_prefix = key_prefix + self.http_error_code = http_error_code + super(Condition, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Condition', super(Condition, self).to_xml()) + + +class Redirect(_XMLKeyValue): + """ + :ivar hostname: The host name to use in the redirect request. + + :ivar protocol: The protocol to use in the redirect request. Can be either + 'http' or 'https'. + + :ivar replace_key: The specific object key to use in the redirect request. + For example, redirect request to error.html. + + :ivar replace_key_prefix: The object key prefix to use in the redirect + request. For example, to redirect requests for all pages with prefix + docs/ (objects in the docs/ folder) to documents/, you can set a + condition block with KeyPrefixEquals set to docs/ and in the Redirect + set ReplaceKeyPrefixWith to /documents. + + :ivar http_redirect_code: The HTTP redirect code to use on the response. + + """ + + TRANSLATOR = [ + ('Protocol', 'protocol'), + ('HostName', 'hostname'), + ('ReplaceKeyWith', 'replace_key'), + ('ReplaceKeyPrefixWith', 'replace_key_prefix'), + ('HttpRedirectCode', 'http_redirect_code'), + ] + + def __init__(self, hostname=None, protocol=None, replace_key=None, + replace_key_prefix=None, http_redirect_code=None): + self.hostname = hostname + self.protocol = protocol + self.replace_key = replace_key + self.replace_key_prefix = replace_key_prefix + self.http_redirect_code = http_redirect_code + super(Redirect, self).__init__(self.TRANSLATOR) + + def to_xml(self): + return tag('Redirect', super(Redirect, self).to_xml()) + + diff --git a/ext/boto/sdb/__init__.py b/ext/boto/sdb/__init__.py new file mode 100644 index 0000000000..415a869618 --- /dev/null +++ b/ext/boto/sdb/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.sdb.regioninfo import SDBRegionInfo +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the SDB service. + + :rtype: list + :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances + """ + return get_regions( + 'sdb', + region_cls=SDBRegionInfo + ) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sdb.connection.SDBConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sdb.connection.SDBConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('sdb', region_name, region_cls=SDBRegionInfo, **kw_params) diff --git a/ext/boto/sdb/connection.py b/ext/boto/sdb/connection.py new file mode 100644 index 0000000000..fa7cb83e3e --- /dev/null +++ b/ext/boto/sdb/connection.py @@ -0,0 +1,618 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import xml.sax +import threading +import boto +from boto import handler +from boto.connection import AWSQueryConnection +from boto.sdb.domain import Domain, DomainMetaData +from boto.sdb.item import Item +from boto.sdb.regioninfo import SDBRegionInfo +from boto.exception import SDBResponseError + +class ItemThread(threading.Thread): + """ + A threaded :class:`Item ` retriever utility class. + Retrieved :class:`Item ` objects are stored in the + ``items`` instance variable after :py:meth:`run() ` is called. + + .. tip:: The item retrieval will not start until + the :func:`run() ` method is called. + """ + def __init__(self, name, domain_name, item_names): + """ + :param str name: A thread name. Used for identification. + :param str domain_name: The name of a SimpleDB + :class:`Domain ` + :type item_names: string or list of strings + :param item_names: The name(s) of the items to retrieve from the specified + :class:`Domain `. + :ivar list items: A list of items retrieved. Starts as empty list. + """ + super(ItemThread, self).__init__(name=name) + #print 'starting %s with %d items' % (name, len(item_names)) + self.domain_name = domain_name + self.conn = SDBConnection() + self.item_names = item_names + self.items = [] + + def run(self): + """ + Start the threaded retrieval of items. Populates the + ``items`` list with :class:`Item ` objects. + """ + for item_name in self.item_names: + item = self.conn.get_attributes(self.domain_name, item_name) + self.items.append(item) + +#boto.set_stream_logger('sdb') + +class SDBConnection(AWSQueryConnection): + """ + This class serves as a gateway to your SimpleDB region (defaults to + us-east-1). Methods within allow access to SimpleDB + :class:`Domain ` objects and their associated + :class:`Item ` objects. + + .. tip:: + While you may instantiate this class directly, it may be easier to + go through :py:func:`boto.connect_sdb`. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sdb.us-east-1.amazonaws.com' + APIVersion = '2009-04-15' + ResponseError = SDBResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, security_token=None, validate_certs=True, + profile_name=None): + """ + For any keywords that aren't documented, refer to the parent class, + :py:class:`boto.connection.AWSAuthConnection`. You can avoid having + to worry about these keyword arguments by instantiating these objects + via :py:func:`boto.connect_sdb`. + + :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo` + :keyword region: Explicitly specify a region. Defaults to ``us-east-1`` + if not specified. You may also specify the region in your ``boto.cfg``: + + .. code-block:: cfg + + [SDB] + region = eu-west-1 + + """ + if not region: + region_name = boto.config.get('SDB', 'region', self.DefaultRegionName) + for reg in boto.sdb.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(SDBConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, + proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.box_usage = 0.0 + self.converter = converter + self.item_cls = Item + + def _required_auth_capability(self): + return ['sdb'] + + def set_item_cls(self, cls): + """ + While the default item class is :py:class:`boto.sdb.item.Item`, this + default may be overridden. Use this method to change a connection's + item class. + + :param object cls: The new class to set as this connection's item + class. See the default item class for inspiration as to what your + replacement should/could look like. + """ + self.item_cls = cls + + def _build_name_value_list(self, params, attributes, replace=False, + label='Attribute'): + keys = sorted(attributes.keys()) + i = 1 + for key in keys: + value = attributes[key] + if isinstance(value, list): + for v in value: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + v = self.converter.encode(v) + params['%s.%d.Value' % (label, i)] = v + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + else: + params['%s.%d.Name' % (label, i)] = key + if self.converter: + value = self.converter.encode(value) + params['%s.%d.Value' % (label, i)] = value + if replace: + params['%s.%d.Replace' % (label, i)] = 'true' + i += 1 + + def _build_expected_value(self, params, expected_value): + params['Expected.1.Name'] = expected_value[0] + if expected_value[1] is True: + params['Expected.1.Exists'] = 'true' + elif expected_value[1] is False: + params['Expected.1.Exists'] = 'false' + else: + params['Expected.1.Value'] = expected_value[1] + + def _build_batch_list(self, params, items, replace=False): + item_names = items.keys() + i = 0 + for item_name in item_names: + params['Item.%d.ItemName' % i] = item_name + j = 0 + item = items[item_name] + if item is not None: + attr_names = item.keys() + for attr_name in attr_names: + value = item[attr_name] + if isinstance(value, list): + for v in value: + if self.converter: + v = self.converter.encode(v) + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + params['Item.%d.Attribute.%d.Value' % (i, j)] = v + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + else: + params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name + if self.converter: + value = self.converter.encode(value) + params['Item.%d.Attribute.%d.Value' % (i, j)] = value + if replace: + params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true' + j += 1 + i += 1 + + def _build_name_list(self, params, attribute_names): + i = 1 + attribute_names.sort() + for name in attribute_names: + params['Attribute.%d.Name' % i] = name + i += 1 + + def get_usage(self): + """ + Returns the BoxUsage (in USD) accumulated on this specific SDBConnection + instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + + :rtype: float + :return: The accumulated BoxUsage of all requests made on the connection. + """ + return self.box_usage + + def print_usage(self): + """ + Print the BoxUsage and approximate costs of all requests made on + this specific SDBConnection instance. + + .. tip:: This can be out of date, and should only be treated as a + rough estimate. Also note that this estimate only applies to the + requests made on this specific connection instance. It is by + no means an account-wide estimate. + """ + print('Total Usage: %f compute seconds' % self.box_usage) + cost = self.box_usage * 0.14 + print('Approximate Cost: $%f' % cost) + + def get_domain(self, domain_name, validate=True): + """ + Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name + matches ``domain_name``. + + :param str domain_name: The name of the domain to retrieve + :keyword bool validate: When ``True``, check to see if the domain + actually exists. If ``False``, blindly return a + :py:class:`Domain ` object with the + specified name set. + + :raises: + :py:class:`boto.exception.SDBResponseError` if ``validate`` is + ``True`` and no match could be found. + + :rtype: :py:class:`boto.sdb.domain.Domain` + :return: The requested domain + """ + domain = Domain(self, domain_name) + if validate: + self.select(domain, """select * from `%s` limit 1""" % domain_name) + return domain + + def lookup(self, domain_name, validate=True): + """ + Lookup an existing SimpleDB domain. This differs from + :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is + ``True`` and no match was found (instead of raising an exception). + + :param str domain_name: The name of the domain to retrieve + + :param bool validate: If ``True``, a ``None`` value will be returned + if the specified domain can't be found. If ``False``, a + :py:class:`Domain ` object will be dumbly + returned, regardless of whether it actually exists. + + :rtype: :class:`boto.sdb.domain.Domain` object or ``None`` + :return: The Domain object or ``None`` if the domain does not exist. + """ + try: + domain = self.get_domain(domain_name, validate) + except: + domain = None + return domain + + def get_all_domains(self, max_domains=None, next_token=None): + """ + Returns a :py:class:`boto.resultset.ResultSet` containing + all :py:class:`boto.sdb.domain.Domain` objects associated with + this connection's Access Key ID. + + :keyword int max_domains: Limit the returned + :py:class:`ResultSet ` to the specified + number of members. + :keyword str next_token: A token string that was returned in an + earlier call to this method as the ``next_token`` attribute + on the returned :py:class:`ResultSet ` + object. This attribute is set if there are more than Domains than + the value specified in the ``max_domains`` keyword. Pass the + ``next_token`` value from you earlier query in this keyword to + get the next 'page' of domains. + """ + params = {} + if max_domains: + params['MaxNumberOfDomains'] = max_domains + if next_token: + params['NextToken'] = next_token + return self.get_list('ListDomains', params, [('DomainName', Domain)]) + + def create_domain(self, domain_name): + """ + Create a SimpleDB domain. + + :type domain_name: string + :param domain_name: The name of the new domain + + :rtype: :class:`boto.sdb.domain.Domain` object + :return: The newly created domain + """ + params = {'DomainName': domain_name} + d = self.get_object('CreateDomain', params, Domain) + d.name = domain_name + return d + + def get_domain_and_name(self, domain_or_name): + """ + Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a + ``tuple`` with the following members (in order): + + * In instance of :class:`boto.sdb.domain.Domain` for the requested + domain + * The domain's name as a ``str`` + + :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain` + :param domain_or_name: The domain or domain name to get the domain + and name for. + + :raises: :class:`boto.exception.SDBResponseError` when an invalid + domain name is specified. + + :rtype: tuple + :return: A ``tuple`` with contents outlined as per above. + """ + if (isinstance(domain_or_name, Domain)): + return (domain_or_name, domain_or_name.name) + else: + return (self.get_domain(domain_or_name), domain_or_name) + + def delete_domain(self, domain_or_name): + """ + Delete a SimpleDB domain. + + .. caution:: This will delete the domain and all items within the domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: bool + :return: True if successful + + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + return self.get_status('DeleteDomain', params) + + def domain_metadata(self, domain_or_name): + """ + Get the Metadata for a SimpleDB domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: :class:`boto.sdb.domain.DomainMetaData` object + :return: The newly created domain metadata object + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + d = self.get_object('DomainMetadata', params, DomainMetaData) + d.domain = domain + return d + + def put_attributes(self, domain_or_name, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + self._build_name_value_list(params, attributes, replace) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('PutAttributes', params) + + def batch_put_attributes(self, domain_or_name, items, replace=True): + """ + Store attributes for multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + self._build_batch_list(params, items, replace) + return self.get_status('BatchPutAttributes', params, verb='POST') + + def get_attributes(self, domain_or_name, item_name, attribute_names=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are + being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. + This parameter is optional. If not supplied, all attributes will + be retrieved for the item. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :type item: :class:`boto.sdb.item.Item` + :keyword item: Instead of instantiating a new Item object, you may + specify one to update. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item with the requested attribute name/values set on it + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + if consistent_read: + params['ConsistentRead'] = 'true' + if attribute_names: + if not isinstance(attribute_names, list): + attribute_names = [attribute_names] + self.build_list_params(params, attribute_names, 'AttributeName') + response = self.make_request('GetAttributes', params) + body = response.read() + if response.status == 200: + if item is None: + item = self.item_cls(domain, item_name) + h = handler.XmlHandler(item, self) + xml.sax.parseString(body, h) + return item + else: + raise SDBResponseError(response.status, response.reason, body) + + def delete_attributes(self, domain_or_name, item_name, attr_names=None, + expected_value=None): + """ + Delete attributes from a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being + deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which + will cause all values associated with that attribute + name to be deleted or a dict or Item containing the + attribute names and keys and list of values to + delete as the value. If no value is supplied, + all attribute name/values for the item will be + deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name, + 'ItemName': item_name} + if attr_names: + if isinstance(attr_names, list): + self._build_name_list(params, attr_names) + elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): + self._build_name_value_list(params, attr_names) + if expected_value: + self._build_expected_value(params, expected_value) + return self.get_status('DeleteAttributes', params) + + def batch_delete_attributes(self, domain_or_name, items): + """ + Delete multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName': domain_name} + self._build_batch_list(params, items, False) + return self.get_status('BatchDeleteAttributes', params, verb='POST') + + def select(self, domain_or_name, query='', next_token=None, + consistent_read=False): + """ + Returns a set of Attributes for item names within domain_name that + match the query. The query must be expressed in using the SELECT + style syntax rather than the original SimpleDB query language. + Even though the select request does not require a domain object, + a domain object must be passed into this method so the Item objects + returned can point to the appropriate domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object + :param domain_or_name: Either the name of a domain or a Domain object + + :type query: string + :param query: The SimpleDB query to be performed. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :rtype: ResultSet + :return: An iterator containing the results. + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'SelectExpression': query} + if consistent_read: + params['ConsistentRead'] = 'true' + if next_token: + params['NextToken'] = next_token + try: + return self.get_list('Select', params, [('Item', self.item_cls)], + parent=domain) + except SDBResponseError as e: + e.body = "Query: %s\n%s" % (query, e.body) + raise e diff --git a/ext/boto/sdb/db/__init__.py b/ext/boto/sdb/db/__init__.py new file mode 100644 index 0000000000..71f6b7b738 --- /dev/null +++ b/ext/boto/sdb/db/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. diff --git a/ext/boto/sdb/db/blob.py b/ext/boto/sdb/db/blob.py new file mode 100644 index 0000000000..6c286ec379 --- /dev/null +++ b/ext/boto/sdb/db/blob.py @@ -0,0 +1,76 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.compat import six + + +class Blob(object): + """Blob object""" + def __init__(self, value=None, file=None, id=None): + self._file = file + self.id = id + self.value = value + + @property + def file(self): + from StringIO import StringIO + if self._file: + f = self._file + else: + f = StringIO(self.value) + return f + + def __str__(self): + return six.text_type(self).encode('utf-8') + + def __unicode__(self): + if hasattr(self.file, "get_contents_as_string"): + value = self.file.get_contents_as_string() + else: + value = self.file.getvalue() + if isinstance(value, six.text_type): + return value + else: + return value.decode('utf-8') + + def read(self): + if hasattr(self.file, "get_contents_as_string"): + return self.file.get_contents_as_string() + else: + return self.file.read() + + def readline(self): + return self.file.readline() + + def next(self): + return next(self.file) + + def __iter__(self): + return iter(self.file) + + @property + def size(self): + if self._file: + return self._file.size + elif self.value: + return len(self.value) + else: + return 0 diff --git a/ext/boto/sdb/db/key.py b/ext/boto/sdb/db/key.py new file mode 100644 index 0000000000..42f6bc9b3a --- /dev/null +++ b/ext/boto/sdb/db/key.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Key(object): + + @classmethod + def from_path(cls, *args, **kwds): + raise NotImplementedError("Paths are not currently supported") + + def __init__(self, encoded=None, obj=None): + self.name = None + if obj: + self.id = obj.id + self.kind = obj.kind() + else: + self.id = None + self.kind = None + + def app(self): + raise NotImplementedError("Applications are not currently supported") + + def kind(self): + return self.kind + + def id(self): + return self.id + + def name(self): + raise NotImplementedError("Key Names are not currently supported") + + def id_or_name(self): + return self.id + + def has_id_or_name(self): + return self.id is not None + + def parent(self): + raise NotImplementedError("Key parents are not currently supported") + + def __str__(self): + return self.id_or_name() diff --git a/ext/boto/sdb/db/manager/__init__.py b/ext/boto/sdb/db/manager/__init__.py new file mode 100644 index 0000000000..ded1716cbb --- /dev/null +++ b/ext/boto/sdb/db/manager/__init__.py @@ -0,0 +1,85 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto + + +def get_manager(cls): + """ + Returns the appropriate Manager class for a given Model class. It + does this by looking in the boto config for a section like this:: + + [DB] + db_type = SimpleDB + db_user = + db_passwd = + db_name = my_domain + [DB_TestBasic] + db_type = SimpleDB + db_user = + db_passwd = + db_name = basic_domain + db_port = 1111 + + The values in the DB section are "generic values" that will be used + if nothing more specific is found. You can also create a section for + a specific Model class that gives the db info for that class. + In the example above, TestBasic is a Model subclass. + """ + db_user = boto.config.get('DB', 'db_user', None) + db_passwd = boto.config.get('DB', 'db_passwd', None) + db_type = boto.config.get('DB', 'db_type', 'SimpleDB') + db_name = boto.config.get('DB', 'db_name', None) + db_table = boto.config.get('DB', 'db_table', None) + db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com") + db_port = boto.config.getint('DB', 'db_port', 443) + enable_ssl = boto.config.getbool('DB', 'enable_ssl', True) + sql_dir = boto.config.get('DB', 'sql_dir', None) + debug = boto.config.getint('DB', 'debug', 0) + # first see if there is a fully qualified section name in the Boto config + module_name = cls.__module__.replace('.', '_') + db_section = 'DB_' + module_name + '_' + cls.__name__ + if not boto.config.has_section(db_section): + db_section = 'DB_' + cls.__name__ + if boto.config.has_section(db_section): + db_user = boto.config.get(db_section, 'db_user', db_user) + db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd) + db_type = boto.config.get(db_section, 'db_type', db_type) + db_name = boto.config.get(db_section, 'db_name', db_name) + db_table = boto.config.get(db_section, 'db_table', db_table) + db_host = boto.config.get(db_section, 'db_host', db_host) + db_port = boto.config.getint(db_section, 'db_port', db_port) + enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl) + debug = boto.config.getint(db_section, 'debug', debug) + elif hasattr(cls, "_db_name") and cls._db_name is not None: + # More specific then the generic DB config is any _db_name class property + db_name = cls._db_name + elif hasattr(cls.__bases__[0], "_manager"): + return cls.__bases__[0]._manager + if db_type == 'SimpleDB': + from boto.sdb.db.manager.sdbmanager import SDBManager + return SDBManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + elif db_type == 'XML': + from boto.sdb.db.manager.xmlmanager import XMLManager + return XMLManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + else: + raise ValueError('Unknown db_type: %s' % db_type) diff --git a/ext/boto/sdb/db/manager/sdbmanager.py b/ext/boto/sdb/db/manager/sdbmanager.py new file mode 100644 index 0000000000..d964d07a2d --- /dev/null +++ b/ext/boto/sdb/db/manager/sdbmanager.py @@ -0,0 +1,738 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import re +from boto.utils import find_class +import uuid +from boto.sdb.db.key import Key +from boto.sdb.db.blob import Blob +from boto.sdb.db.property import ListProperty, MapProperty +from datetime import datetime, date, time +from boto.exception import SDBPersistenceError, S3ResponseError +from boto.compat import map, six, long_type + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + + +class TimeDecodeError(Exception): + pass + + +class SDBConverter(object): + """ + Responsible for converting base Python types to format compatible + with underlying database. For SimpleDB, that means everything + needs to be converted to a string when stored in SimpleDB and from + a string when retrieved. + + To convert a value, pass it to the encode or decode method. The + encode method will take a Python native value and convert to DB + format. The decode method will take a DB format value and convert + it to Python native format. To find the appropriate method to + call, the generic encode/decode methods will look for the + type-specific method by searching for a method + called"encode_" or "decode_". + """ + def __init__(self, manager): + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.model import Model + self.model_class = Model + self.manager = manager + self.type_map = {bool: (self.encode_bool, self.decode_bool), + int: (self.encode_int, self.decode_int), + float: (self.encode_float, self.decode_float), + self.model_class: ( + self.encode_reference, self.decode_reference + ), + Key: (self.encode_reference, self.decode_reference), + datetime: (self.encode_datetime, self.decode_datetime), + date: (self.encode_date, self.decode_date), + time: (self.encode_time, self.decode_time), + Blob: (self.encode_blob, self.decode_blob), + str: (self.encode_string, self.decode_string), + } + if six.PY2: + self.type_map[long] = (self.encode_long, self.decode_long) + + def encode(self, item_type, value): + try: + if self.model_class in item_type.mro(): + item_type = self.model_class + except: + pass + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + return value + + def encode_list(self, prop, value): + if value in (None, []): + return [] + if not isinstance(value, list): + # This is a little trick to avoid encoding when it's just a single value, + # since that most likely means it's from a query + item_type = getattr(prop, "item_type") + return self.encode(item_type, value) + # Just enumerate(value) won't work here because + # we need to add in some zero padding + # We support lists up to 1,000 attributes, since + # SDB technically only supports 1024 attributes anyway + values = {} + for k, v in enumerate(value): + values["%03d" % k] = v + return self.encode_map(prop, values) + + def encode_map(self, prop, value): + import urllib + if value is None: + return None + if not isinstance(value, dict): + raise ValueError('Expected a dict value, got %s' % type(value)) + new_value = [] + for key in value: + item_type = getattr(prop, "item_type") + if self.model_class in item_type.mro(): + item_type = self.model_class + encoded_value = self.encode(item_type, value[key]) + if encoded_value is not None: + new_value.append('%s:%s' % (urllib.quote(key), encoded_value)) + return new_value + + def encode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.encode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.encode_map(prop, value) + else: + return self.encode(prop.data_type, value) + + def decode_list(self, prop, value): + if not isinstance(value, list): + value = [value] + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + dec_val = {} + for val in value: + if val is not None: + k, v = self.decode_map_element(item_type, val) + try: + k = int(k) + except: + k = v + dec_val[k] = v + value = dec_val.values() + return value + + def decode_map(self, prop, value): + if not isinstance(value, list): + value = [value] + ret_value = {} + item_type = getattr(prop, "item_type") + for val in value: + k, v = self.decode_map_element(item_type, val) + ret_value[k] = v + return ret_value + + def decode_map_element(self, item_type, value): + """Decode a single element for a map""" + import urllib + key = value + if ":" in value: + key, value = value.split(':', 1) + key = urllib.unquote(key) + if self.model_class in item_type.mro(): + value = item_type(id=value) + else: + value = self.decode(item_type, value) + return (key, value) + + def decode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.decode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.decode_map(prop, value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + value += 2147483648 + return '%010d' % value + + def decode_int(self, value): + try: + value = int(value) + except: + boto.log.error("Error, %s is not an integer" % value) + value = 0 + value = int(value) + value -= 2147483648 + return int(value) + + def encode_long(self, value): + value = long_type(value) + value += 9223372036854775808 + return '%020d' % value + + def decode_long(self, value): + value = long_type(value) + value -= 9223372036854775808 + return value + + def encode_bool(self, value): + if value == True or str(value).lower() in ("true", "yes"): + return 'true' + else: + return 'false' + + def decode_bool(self, value): + if value.lower() == 'true': + return True + else: + return False + + def encode_float(self, value): + """ + See http://tools.ietf.org/html/draft-wood-ldapext-float-00. + """ + s = '%e' % value + l = s.split('e') + mantissa = l[0].ljust(18, '0') + exponent = l[1] + if value == 0.0: + case = '3' + exponent = '000' + elif mantissa[0] != '-' and exponent[0] == '+': + case = '5' + exponent = exponent[1:].rjust(3, '0') + elif mantissa[0] != '-' and exponent[0] == '-': + case = '4' + exponent = 999 + int(exponent) + exponent = '%03d' % exponent + elif mantissa[0] == '-' and exponent[0] == '-': + case = '2' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = exponent[1:].rjust(3, '0') + else: + case = '1' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = 999 - int(exponent) + exponent = '%03d' % exponent + return '%s %s %s' % (case, exponent, mantissa) + + def decode_float(self, value): + case = value[0] + exponent = value[2:5] + mantissa = value[6:] + if case == '3': + return 0.0 + elif case == '5': + pass + elif case == '4': + exponent = '%03d' % (int(exponent) - 999) + elif case == '2': + mantissa = '%f' % (float(mantissa) - 10) + exponent = '-' + exponent + else: + mantissa = '%f' % (float(mantissa) - 10) + exponent = '%03d' % abs((int(exponent) - 999)) + return float(mantissa + 'e' + exponent) + + def encode_datetime(self, value): + if isinstance(value, six.string_types): + return value + if isinstance(value, datetime): + return value.strftime(ISO8601) + else: + return value.isoformat() + + def decode_datetime(self, value): + """Handles both Dates and DateTime objects""" + if value is None: + return value + try: + if "T" in value: + if "." in value: + # Handle true "isoformat()" dates, which may have a microsecond on at the end of them + return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S") + else: + return datetime.strptime(value, ISO8601) + else: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except Exception: + return None + + def encode_date(self, value): + if isinstance(value, six.string_types): + return value + return value.isoformat() + + def decode_date(self, value): + try: + value = value.split("-") + return date(int(value[0]), int(value[1]), int(value[2])) + except: + return None + + encode_time = encode_date + + def decode_time(self, value): + """ converts strings in the form of HH:MM:SS.mmmmmm + (created by datetime.time.isoformat()) to + datetime.time objects. + + Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't + be handled right now and will raise TimeDecodeError. + """ + if '-' in value or '+' in value: + # TODO: Handle tzinfo + raise TimeDecodeError("Can't handle timezone aware objects: %r" % value) + tmp = value.split('.') + arg = map(int, tmp[0].split(':')) + if len(tmp) == 2: + arg.append(int(tmp[1])) + return time(*arg) + + def encode_reference(self, value): + if value in (None, 'None', '', ' '): + return None + if isinstance(value, six.string_types): + return value + else: + return value.id + + def decode_reference(self, value): + if not value or value == "None": + return None + return value + + def encode_blob(self, value): + if not value: + return None + if isinstance(value, six.string_types): + return value + + if not value.id: + bucket = self.manager.get_blob_bucket() + key = bucket.new_key(str(uuid.uuid4())) + value.id = "s3://%s/%s" % (key.bucket.name, key.name) + else: + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + key = bucket.get_key(match.group(2)) + else: + raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) + + if value.value is not None: + key.set_contents_from_string(value.value) + return value.id + + def decode_blob(self, value): + if not value: + return None + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + try: + key = bucket.get_key(match.group(2)) + except S3ResponseError as e: + if e.reason != "Forbidden": + raise + return None + else: + return None + if key: + return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name)) + else: + return None + + def encode_string(self, value): + """Convert ASCII, Latin-1 or UTF-8 to pure Unicode""" + if not isinstance(value, str): + return value + try: + return six.text_type(value, 'utf-8') + except: + # really, this should throw an exception. + # in the interest of not breaking current + # systems, however: + arr = [] + for ch in value: + arr.append(six.unichr(ord(ch))) + return u"".join(arr) + + def decode_string(self, value): + """Decoding a string is really nothing, just + return the value as-is""" + return value + + +class SDBManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl, + consistent=None): + self.cls = cls + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.enable_ssl = enable_ssl + self.s3 = None + self.bucket = None + self.converter = SDBConverter(self) + self._sdb = None + self._domain = None + if consistent is None and hasattr(cls, "__consistent__"): + consistent = cls.__consistent__ + self.consistent = consistent + + @property + def sdb(self): + if self._sdb is None: + self._connect() + return self._sdb + + @property + def domain(self): + if self._domain is None: + self._connect() + return self._domain + + def _connect(self): + args = dict(aws_access_key_id=self.db_user, + aws_secret_access_key=self.db_passwd, + is_secure=self.enable_ssl) + try: + region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0] + args['region'] = region + except IndexError: + pass + self._sdb = boto.connect_sdb(**args) + # This assumes that the domain has already been created + # It's much more efficient to do it this way rather than + # having this make a roundtrip each time to validate. + # The downside is that if the domain doesn't exist, it breaks + self._domain = self._sdb.lookup(self.db_name, validate=False) + if not self._domain: + self._domain = self._sdb.create_domain(self.db_name) + + def _object_lister(self, cls, query_lister): + for item in query_lister: + obj = self.get_object(cls, item.name, item) + if obj: + yield obj + + def encode_value(self, prop, value): + if value is None: + return None + if not prop: + return str(value) + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.db_user, self.db_passwd) + return self.s3 + + def get_blob_bucket(self, bucket_name=None): + s3 = self.get_s3_connection() + bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name) + bucket_name = bucket_name.lower() + try: + self.bucket = s3.get_bucket(bucket_name) + except: + self.bucket = s3.create_bucket(bucket_name) + return self.bucket + + def load_object(self, obj): + if not obj._loaded: + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + if '__type__' in a: + for prop in obj.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + try: + setattr(obj, prop.name, value) + except Exception as e: + boto.log.exception(e) + obj._loaded = True + + def get_object(self, cls, id, a=None): + obj = None + if not a: + a = self.domain.get_attributes(id, consistent_read=self.consistent) + if '__type__' in a: + if not cls or a['__type__'] != cls.__name__: + cls = find_class(a['__module__'], a['__type__']) + if cls: + params = {} + for prop in cls.properties(hidden=False): + if prop.name in a: + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + params[prop.name] = value + obj = cls(id, **params) + obj._loaded = True + else: + s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__']) + boto.log.info('sdbmanager: %s' % s) + return obj + + def get_object_from_id(self, id): + return self.get_object(None, id) + + def query(self, query): + query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select)) + if query.limit: + query_str += " limit %s" % query.limit + rs = self.domain.select(query_str, max_items=query.limit, next_token=query.next_token) + query.rs = rs + return self._object_lister(query.model_class, rs) + + def count(self, cls, filters, quick=True, sort_by=None, select=None): + """ + Get the number of results that would + be returned in this query + """ + query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select)) + count = 0 + for row in self.domain.select(query): + count += int(row['Count']) + if quick: + return count + return count + + def _build_filter(self, property, name, op, val): + if name == "__id__": + name = 'itemName()' + if name != "itemName()": + name = '`%s`' % name + if val is None: + if op in ('is', '='): + return "%(name)s is null" % {"name": name} + elif op in ('is not', '!='): + return "%s is not null" % name + else: + val = "" + if property.__class__ == ListProperty: + if op in ("is", "="): + op = "like" + elif op in ("!=", "not"): + op = "not like" + if not(op in ["like", "not like"] and val.startswith("%")): + val = "%%:%s" % val + return "%s %s '%s'" % (name, op, val.replace("'", "''")) + + def _build_filter_part(self, cls, filters, order_by=None, select=None): + """ + Build the filter part + """ + import types + query_parts = [] + + order_by_filtered = False + + if order_by: + if order_by[0] == "-": + order_by_method = "DESC" + order_by = order_by[1:] + else: + order_by_method = "ASC" + + if select: + if order_by and order_by in select: + order_by_filtered = True + query_parts.append("(%s)" % select) + + if isinstance(filters, six.string_types): + query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__) + if order_by in ["__id__", "itemName()"]: + query += " ORDER BY itemName() %s" % order_by_method + elif order_by is not None: + query += " ORDER BY `%s` %s" % (order_by, order_by_method) + return query + + for filter in filters: + filter_parts = [] + filter_props = filter[0] + if not isinstance(filter_props, list): + filter_props = [filter_props] + for filter_prop in filter_props: + (name, op) = filter_prop.strip().split(" ", 1) + value = filter[1] + property = cls.find_property(name) + if name == order_by: + order_by_filtered = True + if types.TypeType(value) == list: + filter_parts_sub = [] + for val in value: + val = self.encode_value(property, val) + if isinstance(val, list): + for v in val: + filter_parts_sub.append(self._build_filter(property, name, op, v)) + else: + filter_parts_sub.append(self._build_filter(property, name, op, val)) + filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub))) + else: + val = self.encode_value(property, value) + if isinstance(val, list): + for v in val: + filter_parts.append(self._build_filter(property, name, op, v)) + else: + filter_parts.append(self._build_filter(property, name, op, val)) + query_parts.append("(%s)" % (" or ".join(filter_parts))) + + + type_query = "(`__type__` = '%s'" % cls.__name__ + for subclass in self._get_all_decendents(cls).keys(): + type_query += " or `__type__` = '%s'" % subclass + type_query += ")" + query_parts.append(type_query) + + order_by_query = "" + + if order_by: + if not order_by_filtered: + query_parts.append("`%s` LIKE '%%'" % order_by) + if order_by in ["__id__", "itemName()"]: + order_by_query = " ORDER BY itemName() %s" % order_by_method + else: + order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method) + + if len(query_parts) > 0: + return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query) + else: + return "" + + + def _get_all_decendents(self, cls): + """Get all decendents for a given class""" + decendents = {} + for sc in cls.__sub_classes__: + decendents[sc.__name__] = sc + decendents.update(self._get_all_decendents(sc)) + return decendents + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in SimpleDB") + + def save_object(self, obj, expected_value=None): + if not obj.id: + obj.id = str(uuid.uuid4()) + + attrs = {'__type__': obj.__class__.__name__, + '__module__': obj.__class__.__module__, + '__lineage__': obj.get_lineage()} + del_attrs = [] + for property in obj.properties(hidden=False): + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if value == []: + value = None + if value is None: + del_attrs.append(property.name) + continue + attrs[property.name] = value + if property.unique: + try: + args = {property.name: value} + obj2 = next(obj.find(**args)) + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % property.name) + except(StopIteration): + pass + # Convert the Expected value to SDB format + if expected_value: + prop = obj.find_property(expected_value[0]) + v = expected_value[1] + if v is not None and not isinstance(v, bool): + v = self.encode_value(prop, v) + expected_value[1] = v + self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value) + if len(del_attrs) > 0: + self.domain.delete_attributes(obj.id, del_attrs) + return obj + + def delete_object(self, obj): + self.domain.delete_attributes(obj.id) + + def set_property(self, prop, obj, name, value): + setattr(obj, name, value) + value = prop.get_value_for_datastore(obj) + value = self.encode_value(prop, value) + if prop.unique: + try: + args = {prop.name: value} + obj2 = next(obj.find(**args)) + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % prop.name) + except(StopIteration): + pass + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def get_property(self, prop, obj, name): + a = self.domain.get_attributes(obj.id, consistent_read=self.consistent) + + # try to get the attribute value from SDB + if name in a: + value = self.decode_value(prop, a[name]) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + return value + raise AttributeError('%s not found' % name) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) diff --git a/ext/boto/sdb/db/manager/xmlmanager.py b/ext/boto/sdb/db/manager/xmlmanager.py new file mode 100644 index 0000000000..f457347ad3 --- /dev/null +++ b/ext/boto/sdb/db/manager/xmlmanager.py @@ -0,0 +1,517 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from boto.utils import find_class, Password +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +from boto.compat import six, encodebytes +from datetime import datetime +from xml.dom.minidom import getDOMImplementation, parse, parseString, Node + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +class XMLConverter(object): + """ + Responsible for converting base Python types to format compatible with underlying + database. For SimpleDB, that means everything needs to be converted to a string + when stored in SimpleDB and from a string when retrieved. + + To convert a value, pass it to the encode or decode method. The encode method + will take a Python native value and convert to DB format. The decode method will + take a DB format value and convert it to Python native format. To find the appropriate + method to call, the generic encode/decode methods will look for the type-specific + method by searching for a method called "encode_" or "decode_". + """ + def __init__(self, manager): + self.manager = manager + self.type_map = { bool : (self.encode_bool, self.decode_bool), + int : (self.encode_int, self.decode_int), + Model : (self.encode_reference, self.decode_reference), + Key : (self.encode_reference, self.decode_reference), + Password : (self.encode_password, self.decode_password), + datetime : (self.encode_datetime, self.decode_datetime)} + if six.PY2: + self.type_map[long] = (self.encode_long, self.decode_long) + + def get_text_value(self, parent_node): + value = '' + for node in parent_node.childNodes: + if node.nodeType == node.TEXT_NODE: + value += node.data + return value + + def encode(self, item_type, value): + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + else: + value = self.get_text_value(value) + return value + + def encode_prop(self, prop, value): + if isinstance(value, list): + if hasattr(prop, 'item_type'): + new_value = [] + for v in value: + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + new_value.append(self.encode(item_type, v)) + return new_value + else: + return value + else: + return self.encode(prop.data_type, value) + + def decode_prop(self, prop, value): + if prop.data_type == list: + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + values = [] + for item_node in value.getElementsByTagName('item'): + value = self.decode(item_type, item_node) + values.append(value) + return values + else: + return self.get_text_value(value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + return '%d' % value + + def decode_int(self, value): + value = self.get_text_value(value) + if value: + value = int(value) + else: + value = None + return value + + def encode_long(self, value): + value = long(value) + return '%d' % value + + def decode_long(self, value): + value = self.get_text_value(value) + return long(value) + + def encode_bool(self, value): + if value == True: + return 'true' + else: + return 'false' + + def decode_bool(self, value): + value = self.get_text_value(value) + if value.lower() == 'true': + return True + else: + return False + + def encode_datetime(self, value): + return value.strftime(ISO8601) + + def decode_datetime(self, value): + value = self.get_text_value(value) + try: + return datetime.strptime(value, ISO8601) + except: + return None + + def encode_reference(self, value): + if isinstance(value, six.string_types): + return value + if value is None: + return '' + else: + val_node = self.manager.doc.createElement("object") + val_node.setAttribute('id', value.id) + val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__)) + return val_node + + def decode_reference(self, value): + if not value: + return None + try: + value = value.childNodes[0] + class_name = value.getAttribute("class") + id = value.getAttribute("id") + cls = find_class(class_name) + return cls.get_by_ids(id) + except: + return None + + def encode_password(self, value): + if value and len(value) > 0: + return str(value) + else: + return None + + def decode_password(self, value): + value = self.get_text_value(value) + return Password(value) + + +class XMLManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl): + self.cls = cls + if not db_name: + db_name = cls.__name__.lower() + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.s3 = None + self.converter = XMLConverter(self) + self.impl = getDOMImplementation() + self.doc = self.impl.createDocument(None, 'objects', None) + + self.connection = None + self.enable_ssl = enable_ssl + self.auth_header = None + if self.db_user: + base64string = encodebytes('%s:%s' % (self.db_user, self.db_passwd))[:-1] + authheader = "Basic %s" % base64string + self.auth_header = authheader + + def _connect(self): + if self.db_host: + if self.enable_ssl: + from httplib import HTTPSConnection as Connection + else: + from httplib import HTTPConnection as Connection + + self.connection = Connection(self.db_host, self.db_port) + + def _make_request(self, method, url, post_data=None, body=None): + """ + Make a request on this connection + """ + if not self.connection: + self._connect() + try: + self.connection.close() + except: + pass + self.connection.connect() + headers = {} + if self.auth_header: + headers["Authorization"] = self.auth_header + self.connection.request(method, url, body, headers) + resp = self.connection.getresponse() + return resp + + def new_doc(self): + return self.impl.createDocument(None, 'objects', None) + + def _object_lister(self, cls, doc): + for obj_node in doc.getElementsByTagName('object'): + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + if prop: + if hasattr(prop, 'item_type'): + value = self.get_list(prop_node, prop.item_type) + else: + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + yield obj + + def reset(self): + self._connect() + + def get_doc(self): + return self.doc + + def encode_value(self, prop, value): + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key) + return self.s3 + + def get_list(self, prop_node, item_type): + values = [] + try: + items_node = prop_node.getElementsByTagName('items')[0] + except: + return [] + for item_node in items_node.getElementsByTagName('item'): + value = self.converter.decode(item_type, item_node) + values.append(value) + return values + + def get_object_from_doc(self, cls, id, doc): + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value is not None: + try: + setattr(obj, prop.name, value) + except: + pass + return obj + + def get_props_from_doc(self, cls, id, doc): + """ + Pull out the properties from this document + Returns the class, the properties in a hash, and the id if provided as a tuple + :return: (cls, props, id) + """ + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + props = {} + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = cls.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value is not None: + props[prop.name] = value + return (cls, props, id) + + + def get_object(self, cls, id): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + url = "/%s/%s" % (self.db_name, id) + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self.get_object_from_doc(cls, id, doc) + + def query(self, cls, filters, limit=None, order_by=None): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + + from urllib import urlencode + + query = str(self._build_query(cls, filters, limit, order_by)) + if query: + url = "/%s?%s" % (self.db_name, urlencode({"query": query})) + else: + url = "/%s" % self.db_name + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self._object_lister(cls, doc) + + def _build_query(self, cls, filters, limit, order_by): + import types + if len(filters) > 4: + raise Exception('Too many filters, max is 4') + parts = [] + properties = cls.properties(hidden=False) + for filter, value in filters: + name, op = filter.strip().split() + found = False + for property in properties: + if property.name == name: + found = True + if types.TypeType(value) == list: + filter_parts = [] + for val in value: + val = self.encode_value(property, val) + filter_parts.append("'%s' %s '%s'" % (name, op, val)) + parts.append("[%s]" % " OR ".join(filter_parts)) + else: + value = self.encode_value(property, value) + parts.append("['%s' %s '%s']" % (name, op, value)) + if not found: + raise Exception('%s is not a valid field' % name) + if order_by: + if order_by.startswith("-"): + key = order_by[1:] + type = "desc" + else: + key = order_by + type = "asc" + parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type)) + return ' intersection '.join(parts) + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError("GQL queries not supported in XML") + + def save_list(self, doc, items, prop_node): + items_node = doc.createElement('items') + prop_node.appendChild(items_node) + for item in items: + item_node = doc.createElement('item') + items_node.appendChild(item_node) + if isinstance(item, Node): + item_node.appendChild(item) + else: + text_node = doc.createTextNode(item) + item_node.appendChild(text_node) + + def save_object(self, obj, expected_value=None): + """ + Marshal the object and do a PUT + """ + doc = self.marshal_object(obj) + if obj.id: + url = "/%s/%s" % (self.db_name, obj.id) + else: + url = "/%s" % (self.db_name) + resp = self._make_request("PUT", url, body=doc.toxml()) + new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp)) + obj.id = new_obj.id + for prop in obj.properties(): + try: + propname = prop.name + except AttributeError: + propname = None + if propname: + value = getattr(new_obj, prop.name) + if value: + setattr(obj, prop.name, value) + return obj + + + def marshal_object(self, obj, doc=None): + if not doc: + doc = self.new_doc() + if not doc: + doc = self.doc + obj_node = doc.createElement('object') + + if obj.id: + obj_node.setAttribute('id', obj.id) + + obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__, + obj.__class__.__name__)) + root = doc.documentElement + root.appendChild(obj_node) + for property in obj.properties(hidden=False): + prop_node = doc.createElement('property') + prop_node.setAttribute('name', property.name) + prop_node.setAttribute('type', property.type_name) + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if isinstance(value, list): + self.save_list(doc, value, prop_node) + elif isinstance(value, Node): + prop_node.appendChild(value) + else: + text_node = doc.createTextNode(six.text_type(value).encode("ascii", "ignore")) + prop_node.appendChild(text_node) + obj_node.appendChild(prop_node) + + return doc + + def unmarshal_object(self, fp, cls=None, id=None): + if isinstance(fp, six.string_types): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_object_from_doc(cls, id, doc) + + def unmarshal_props(self, fp, cls=None, id=None): + """ + Same as unmarshalling an object, except it returns + from "get_props_from_doc" + """ + if isinstance(fp, six.string_types): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_props_from_doc(cls, id, doc) + + def delete_object(self, obj): + url = "/%s/%s" % (self.db_name, obj.id) + return self._make_request("DELETE", url) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name: value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name) + if name in a: + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) + + def set_property(self, prop, obj, name, value): + pass + + def get_property(self, prop, obj, name): + pass + + def load_object(self, obj): + if not obj._loaded: + obj = obj.get_by_id(obj.id) + obj._loaded = True + return obj diff --git a/ext/boto/sdb/db/model.py b/ext/boto/sdb/db/model.py new file mode 100644 index 0000000000..741ad43871 --- /dev/null +++ b/ext/boto/sdb/db/model.py @@ -0,0 +1,296 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sdb.db.property import Property +from boto.sdb.db.key import Key +from boto.sdb.db.query import Query +import boto +from boto.compat import filter + +class ModelMeta(type): + "Metaclass for all Models" + + def __init__(cls, name, bases, dict): + super(ModelMeta, cls).__init__(name, bases, dict) + # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!) + cls.__sub_classes__ = [] + + # Do a delayed import to prevent possible circular import errors. + from boto.sdb.db.manager import get_manager + + try: + if filter(lambda b: issubclass(b, Model), bases): + for base in bases: + base.__sub_classes__.append(cls) + cls._manager = get_manager(cls) + # look for all of the Properties and set their names + for key in dict.keys(): + if isinstance(dict[key], Property): + property = dict[key] + property.__property_config__(cls, key) + prop_names = [] + props = cls.properties() + for prop in props: + if not prop.__class__.__name__.startswith('_'): + prop_names.append(prop.name) + setattr(cls, '_prop_names', prop_names) + except NameError: + # 'Model' isn't defined yet, meaning we're looking at our own + # Model class, defined below. + pass + +class Model(object): + __metaclass__ = ModelMeta + __consistent__ = False # Consistent is set off by default + id = None + + @classmethod + def get_lineage(cls): + l = [c.__name__ for c in cls.mro()] + l.reverse() + return '.'.join(l) + + @classmethod + def kind(cls): + return cls.__name__ + + @classmethod + def _get_by_id(cls, id, manager=None): + if not manager: + manager = cls._manager + return manager.get_object(cls, id) + + @classmethod + def get_by_id(cls, ids=None, parent=None): + if isinstance(ids, list): + objs = [cls._get_by_id(id) for id in ids] + return objs + else: + return cls._get_by_id(ids) + + get_by_ids = get_by_id + + @classmethod + def get_by_key_name(cls, key_names, parent=None): + raise NotImplementedError("Key Names are not currently supported") + + @classmethod + def find(cls, limit=None, next_token=None, **params): + q = Query(cls, limit=limit, next_token=next_token) + for key, value in params.items(): + q.filter('%s =' % key, value) + return q + + @classmethod + def all(cls, limit=None, next_token=None): + return cls.find(limit=limit, next_token=next_token) + + @classmethod + def get_or_insert(key_name, **kw): + raise NotImplementedError("get_or_insert not currently supported") + + @classmethod + def properties(cls, hidden=True): + properties = [] + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if hidden or not prop.__class__.__name__.startswith('_'): + properties.append(prop) + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return properties + + @classmethod + def find_property(cls, prop_name): + property = None + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if not prop.__class__.__name__.startswith('_') and prop_name == prop.name: + property = prop + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return property + + @classmethod + def get_xmlmanager(cls): + if not hasattr(cls, '_xmlmanager'): + from boto.sdb.db.manager.xmlmanager import XMLManager + cls._xmlmanager = XMLManager(cls, None, None, None, + None, None, None, None, False) + return cls._xmlmanager + + @classmethod + def from_xml(cls, fp): + xmlmanager = cls.get_xmlmanager() + return xmlmanager.unmarshal_object(fp) + + def __init__(self, id=None, **kw): + self._loaded = False + # first try to initialize all properties to their default values + for prop in self.properties(hidden=False): + try: + setattr(self, prop.name, prop.default_value()) + except ValueError: + pass + if 'manager' in kw: + self._manager = kw['manager'] + self.id = id + for key in kw: + if key != 'manager': + # We don't want any errors populating up when loading an object, + # so if it fails we just revert to it's default value + try: + setattr(self, key, kw[key]) + except Exception as e: + boto.log.exception(e) + + def __repr__(self): + return '%s<%s>' % (self.__class__.__name__, self.id) + + def __str__(self): + return str(self.id) + + def __eq__(self, other): + return other and isinstance(other, Model) and self.id == other.id + + def _get_raw_item(self): + return self._manager.get_raw_item(self) + + def load(self): + if self.id and not self._loaded: + self._manager.load_object(self) + + def reload(self): + if self.id: + self._loaded = False + self._manager.load_object(self) + + def put(self, expected_value=None): + """ + Save this object as it is, with an optional expected value + + :param expected_value: Optional tuple of Attribute, and Value that + must be the same in order to save this object. If this + condition is not met, an SDBResponseError will be raised with a + Confict status code. + :type expected_value: tuple or list + :return: This object + :rtype: :class:`boto.sdb.db.model.Model` + """ + self._manager.save_object(self, expected_value) + return self + + save = put + + def put_attributes(self, attrs): + """ + Save just these few attributes, not the whole object + + :param attrs: Attributes to save, key->value dict + :type attrs: dict + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save" + for prop_name in attrs: + value = attrs[prop_name] + prop = self.find_property(prop_name) + assert(prop), "Property not found: %s" % prop_name + self._manager.set_property(prop, self, prop_name, value) + self.reload() + return self + + def delete_attributes(self, attrs): + """ + Delete just these attributes, not the whole object. + + :param attrs: Attributes to save, as a list of string names + :type attrs: list + :return: self + :rtype: :class:`boto.sdb.db.model.Model` + """ + assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete." + self._manager.domain.delete_attributes(self.id, attrs) + self.reload() + return self + + save_attributes = put_attributes + + def delete(self): + self._manager.delete_object(self) + + def key(self): + return Key(obj=self) + + def set_manager(self, manager): + self._manager = manager + + def to_dict(self): + props = {} + for prop in self.properties(hidden=False): + props[prop.name] = getattr(self, prop.name) + obj = {'properties': props, + 'id': self.id} + return {self.__class__.__name__: obj} + + def to_xml(self, doc=None): + xmlmanager = self.get_xmlmanager() + doc = xmlmanager.marshal_object(self, doc) + return doc + + @classmethod + def find_subclass(cls, name): + """Find a subclass with a given name""" + if name == cls.__name__: + return cls + for sc in cls.__sub_classes__: + r = sc.find_subclass(name) + if r is not None: + return r + +class Expando(Model): + + def __setattr__(self, name, value): + if name in self._prop_names: + object.__setattr__(self, name, value) + elif name.startswith('_'): + object.__setattr__(self, name, value) + elif name == 'id': + object.__setattr__(self, name, value) + else: + self._manager.set_key_value(self, name, value) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if not name.startswith('_'): + value = self._manager.get_key_value(self, name) + if value: + object.__setattr__(self, name, value) + return value + raise AttributeError diff --git a/ext/boto/sdb/db/property.py b/ext/boto/sdb/db/property.py new file mode 100644 index 0000000000..575aa8924d --- /dev/null +++ b/ext/boto/sdb/db/property.py @@ -0,0 +1,704 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import datetime +from boto.sdb.db.key import Key +from boto.utils import Password +from boto.sdb.db.query import Query +import re +import boto +import boto.s3.key +from boto.sdb.db.blob import Blob +from boto.compat import six, long_type + + +class Property(object): + + data_type = str + type_name = '' + name = '' + verbose_name = '' + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + self.verbose_name = verbose_name + self.name = name + self.default = default + self.required = required + self.validator = validator + self.choices = choices + if self.name: + self.slot_name = '_' + self.name + else: + self.slot_name = '_' + self.unique = unique + + def __get__(self, obj, objtype): + if obj: + obj.load() + return getattr(obj, self.slot_name) + else: + return None + + def __set__(self, obj, value): + self.validate(value) + + # Fire off any on_set functions + try: + if obj._loaded and hasattr(obj, "on_set_%s" % self.name): + fnc = getattr(obj, "on_set_%s" % self.name) + value = fnc(value) + except Exception: + boto.log.exception("Exception running on_set_%s" % self.name) + + setattr(obj, self.slot_name, value) + + def __property_config__(self, model_class, property_name): + self.model_class = model_class + self.name = property_name + self.slot_name = '_' + self.name + + def default_validator(self, value): + if isinstance(value, six.string_types) or value == self.default_value(): + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, %s.%s expecting %s, got %s' % (self.model_class.__name__, self.name, self.data_type, type(value))) + + def default_value(self): + return self.default + + def validate(self, value): + if self.required and value is None: + raise ValueError('%s is a required property' % self.name) + if self.choices and value and value not in self.choices: + raise ValueError('%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)) + if self.validator: + self.validator(value) + else: + self.default_validator(value) + return value + + def empty(self, value): + return not value + + def get_value_for_datastore(self, model_instance): + return getattr(model_instance, self.name) + + def make_value_from_datastore(self, value): + return value + + def get_choices(self): + if callable(self.choices): + return self.choices() + return self.choices + + +def validate_string(value): + if value is None: + return + elif isinstance(value, six.string_types): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting String, got %s' % type(value)) + + +class StringProperty(Property): + + type_name = 'String' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=validate_string, + choices=None, unique=False): + super(StringProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + + +class TextProperty(Property): + + type_name = 'Text' + + def __init__(self, verbose_name=None, name=None, default='', + required=False, validator=None, choices=None, + unique=False, max_length=None): + super(TextProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.max_length = max_length + + def validate(self, value): + value = super(TextProperty, self).validate(value) + if not isinstance(value, six.string_types): + raise TypeError('Expecting Text, got %s' % type(value)) + if self.max_length and len(value) > self.max_length: + raise ValueError('Length of value greater than maxlength %s' % self.max_length) + + +class PasswordProperty(StringProperty): + """ + + Hashed property whose original value can not be + retrieved, but still can be compared. + + Works by storing a hash of the original value instead + of the original value. Once that's done all that + can be retrieved is the hash. + + The comparison + + obj.password == 'foo' + + generates a hash of 'foo' and compares it to the + stored hash. + + Underlying data type for hashing, storing, and comparing + is boto.utils.Password. The default hash function is + defined there ( currently sha512 in most cases, md5 + where sha512 is not available ) + + It's unlikely you'll ever need to use a different hash + function, but if you do, you can control the behavior + in one of two ways: + + 1) Specifying hashfunc in PasswordProperty constructor + + import hashlib + + class MyModel(model): + password = PasswordProperty(hashfunc=hashlib.sha224) + + 2) Subclassing Password and PasswordProperty + + class SHA224Password(Password): + hashfunc=hashlib.sha224 + + class SHA224PasswordProperty(PasswordProperty): + data_type=MyPassword + type_name="MyPassword" + + class MyModel(Model): + password = SHA224PasswordProperty() + + """ + data_type = Password + type_name = 'Password' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=None, choices=None, unique=False, hashfunc=None): + + """ + The hashfunc parameter overrides the default hashfunc in boto.utils.Password. + + The remaining parameters are passed through to StringProperty.__init__""" + + super(PasswordProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.hashfunc = hashfunc + + def make_value_from_datastore(self, value): + p = self.data_type(value, hashfunc=self.hashfunc) + return p + + def get_value_for_datastore(self, model_instance): + value = super(PasswordProperty, self).get_value_for_datastore(model_instance) + if value and len(value): + return str(value) + else: + return None + + def __set__(self, obj, value): + if not isinstance(value, self.data_type): + p = self.data_type(hashfunc=self.hashfunc) + p.set(value) + value = p + super(PasswordProperty, self).__set__(obj, value) + + def __get__(self, obj, objtype): + return self.data_type(super(PasswordProperty, self).__get__(obj, objtype), hashfunc=self.hashfunc) + + def validate(self, value): + value = super(PasswordProperty, self).validate(value) + if isinstance(value, self.data_type): + if len(value) > 1024: + raise ValueError('Length of value greater than maxlength') + else: + raise TypeError('Expecting %s, got %s' % (type(self.data_type), type(value))) + + +class BlobProperty(Property): + data_type = Blob + type_name = "blob" + + def __set__(self, obj, value): + if value != self.default_value(): + if not isinstance(value, Blob): + oldb = self.__get__(obj, type(obj)) + id = None + if oldb: + id = oldb.id + b = Blob(value=value, id=id) + value = b + super(BlobProperty, self).__set__(obj, value) + + +class S3KeyProperty(Property): + + data_type = boto.s3.key.Key + type_name = 'S3Key' + validate_regex = "^s3:\/\/([^\/]*)\/(.*)$" + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + super(S3KeyProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + + def validate(self, value): + value = super(S3KeyProperty, self).validate(value) + if value == self.default_value() or value == str(self.default_value()): + return self.default_value() + if isinstance(value, self.data_type): + return + match = re.match(self.validate_regex, value) + if match: + return + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def __get__(self, obj, objtype): + value = super(S3KeyProperty, self).__get__(obj, objtype) + if value: + if isinstance(value, self.data_type): + return value + match = re.match(self.validate_regex, value) + if match: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + k = bucket.get_key(match.group(2)) + if not k: + k = bucket.new_key(match.group(2)) + k.set_contents_from_string("") + return k + else: + return value + + def get_value_for_datastore(self, model_instance): + value = super(S3KeyProperty, self).get_value_for_datastore(model_instance) + if value: + return "s3://%s/%s" % (value.bucket.name, value.name) + else: + return None + + +class IntegerProperty(Property): + + data_type = int + type_name = 'Integer' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False, max=2147483647, min=-2147483648): + super(IntegerProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.max = max + self.min = min + + def validate(self, value): + value = int(value) + value = super(IntegerProperty, self).validate(value) + if value > self.max: + raise ValueError('Maximum value is %d' % self.max) + if value < self.min: + raise ValueError('Minimum value is %d' % self.min) + return value + + def empty(self, value): + return value is None + + def __set__(self, obj, value): + if value == "" or value is None: + value = 0 + return super(IntegerProperty, self).__set__(obj, value) + + +class LongProperty(Property): + + data_type = long_type + type_name = 'Long' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False): + super(LongProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = long_type(value) + value = super(LongProperty, self).validate(value) + min = -9223372036854775808 + max = 9223372036854775807 + if value > max: + raise ValueError('Maximum value is %d' % max) + if value < min: + raise ValueError('Minimum value is %d' % min) + return value + + def empty(self, value): + return value is None + + +class BooleanProperty(Property): + + data_type = bool + type_name = 'Boolean' + + def __init__(self, verbose_name=None, name=None, default=False, required=False, + validator=None, choices=None, unique=False): + super(BooleanProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def empty(self, value): + return value is None + + +class FloatProperty(Property): + + data_type = float + type_name = 'Float' + + def __init__(self, verbose_name=None, name=None, default=0.0, required=False, + validator=None, choices=None, unique=False): + super(FloatProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = float(value) + value = super(FloatProperty, self).validate(value) + return value + + def empty(self, value): + return value is None + + +class DateTimeProperty(Property): + """This class handles both the datetime.datetime object + And the datetime.date objects. It can return either one, + depending on the value stored in the database""" + + data_type = datetime.datetime + type_name = 'DateTime' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(DateTimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return super(DateTimeProperty, self).default_value() + + def validate(self, value): + if value is None: + return + if isinstance(value, datetime.date): + return value + return super(DateTimeProperty, self).validate(value) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + return super(DateTimeProperty, self).get_value_for_datastore(model_instance) + + def now(self): + return datetime.datetime.utcnow() + + +class DateProperty(Property): + + data_type = datetime.date + type_name = 'Date' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(DateProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return super(DateProperty, self).default_value() + + def validate(self, value): + value = super(DateProperty, self).validate(value) + if value is None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + val = super(DateProperty, self).get_value_for_datastore(model_instance) + if isinstance(val, datetime.datetime): + val = val.date() + return val + + def now(self): + return datetime.date.today() + + +class TimeProperty(Property): + data_type = datetime.time + type_name = 'Time' + + def __init__(self, verbose_name=None, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + super(TimeProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = super(TimeProperty, self).validate(value) + if value is None: + return + if not isinstance(value, self.data_type): + raise TypeError('Validation Error, expecting %s, got %s' % (self.data_type, type(value))) + + +class ReferenceProperty(Property): + + data_type = Key + type_name = 'Reference' + + def __init__(self, reference_class=None, collection_name=None, + verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): + super(ReferenceProperty, self).__init__(verbose_name, name, default, required, validator, choices, unique) + self.reference_class = reference_class + self.collection_name = collection_name + + def __get__(self, obj, objtype): + if obj: + value = getattr(obj, self.slot_name) + if value == self.default_value(): + return value + # If the value is still the UUID for the referenced object, we need to create + # the object now that is the attribute has actually been accessed. This lazy + # instantiation saves unnecessary roundtrips to SimpleDB + if isinstance(value, six.string_types): + value = self.reference_class(value) + setattr(obj, self.name, value) + return value + + def __set__(self, obj, value): + """Don't allow this object to be associated to itself + This causes bad things to happen""" + if value is not None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)): + raise ValueError("Can not associate an object with itself!") + return super(ReferenceProperty, self).__set__(obj, value) + + def __property_config__(self, model_class, property_name): + super(ReferenceProperty, self).__property_config__(model_class, property_name) + if self.collection_name is None: + self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name) + if hasattr(self.reference_class, self.collection_name): + raise ValueError('duplicate property: %s' % self.collection_name) + setattr(self.reference_class, self.collection_name, + _ReverseReferenceProperty(model_class, property_name, self.collection_name)) + + def check_uuid(self, value): + # This does a bit of hand waving to "type check" the string + t = value.split('-') + if len(t) != 5: + raise ValueError + + def check_instance(self, value): + try: + obj_lineage = value.get_lineage() + cls_lineage = self.reference_class.get_lineage() + if obj_lineage.startswith(cls_lineage): + return + raise TypeError('%s not instance of %s' % (obj_lineage, cls_lineage)) + except: + raise ValueError('%s is not a Model' % value) + + def validate(self, value): + if self.validator: + self.validator(value) + if self.required and value is None: + raise ValueError('%s is a required property' % self.name) + if value == self.default_value(): + return + if not isinstance(value, six.string_types): + self.check_instance(value) + + +class _ReverseReferenceProperty(Property): + data_type = Query + type_name = 'query' + + def __init__(self, model, prop, name): + self.__model = model + self.__property = prop + self.collection_name = prop + self.name = name + self.item_type = model + + def __get__(self, model_instance, model_class): + """Fetches collection of model instances of this collection property.""" + if model_instance is not None: + query = Query(self.__model) + if isinstance(self.__property, list): + props = [] + for prop in self.__property: + props.append("%s =" % prop) + return query.filter(props, model_instance) + else: + return query.filter(self.__property + ' =', model_instance) + else: + return self + + def __set__(self, model_instance, value): + """Not possible to set a new collection.""" + raise ValueError('Virtual property is read-only') + + +class CalculatedProperty(Property): + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, + calculated_type=int, unique=False, use_method=False): + super(CalculatedProperty, self).__init__(verbose_name, name, default, required, + validator, choices, unique) + self.calculated_type = calculated_type + self.use_method = use_method + + def __get__(self, obj, objtype): + value = self.default_value() + if obj: + try: + value = getattr(obj, self.slot_name) + if self.use_method: + value = value() + except AttributeError: + pass + return value + + def __set__(self, obj, value): + """Not possible to set a new AutoID.""" + pass + + def _set_direct(self, obj, value): + if not self.use_method: + setattr(obj, self.slot_name, value) + + def get_value_for_datastore(self, model_instance): + if self.calculated_type in [str, int, bool]: + value = self.__get__(model_instance, model_instance.__class__) + return value + else: + return None + + +class ListProperty(Property): + + data_type = list + type_name = 'List' + + def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = [] + self.item_type = item_type + super(ListProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + if self.validator: + self.validator(value) + if value is not None: + if not isinstance(value, list): + value = [value] + + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + + for item in value: + if not isinstance(item, item_type): + if item_type == six.integer_types: + raise ValueError('Items in the %s list must all be integers.' % self.name) + else: + raise ValueError('Items in the %s list must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return list(super(ListProperty, self).default_value()) + + def __set__(self, obj, value): + """Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in""" + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + if isinstance(value, item_type): + value = [value] + elif value is None: # Override to allow them to set this to "None" to remove everything + value = [] + return super(ListProperty, self).__set__(obj, value) + + +class MapProperty(Property): + + data_type = dict + type_name = 'Map' + + def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = {} + self.item_type = item_type + super(MapProperty, self).__init__(verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + value = super(MapProperty, self).validate(value) + if value is not None: + if not isinstance(value, dict): + raise ValueError('Value must of type dict') + + if self.item_type in six.integer_types: + item_type = six.integer_types + elif self.item_type in six.string_types: + item_type = six.string_types + else: + item_type = self.item_type + + for key in value: + if not isinstance(value[key], item_type): + if item_type == six.integer_types: + raise ValueError('Values in the %s Map must all be integers.' % self.name) + else: + raise ValueError('Values in the %s Map must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return {} diff --git a/ext/boto/sdb/db/query.py b/ext/boto/sdb/db/query.py new file mode 100644 index 0000000000..8945d4c0aa --- /dev/null +++ b/ext/boto/sdb/db/query.py @@ -0,0 +1,86 @@ +from boto.compat import six +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Query(object): + __local_iter__ = None + def __init__(self, model_class, limit=None, next_token=None, manager=None): + self.model_class = model_class + self.limit = limit + self.offset = 0 + if manager: + self.manager = manager + else: + self.manager = self.model_class._manager + self.filters = [] + self.select = None + self.sort_by = None + self.rs = None + self.next_token = next_token + + def __iter__(self): + return iter(self.manager.query(self)) + + def next(self): + if self.__local_iter__ is None: + self.__local_iter__ = self.__iter__() + return next(self.__local_iter__) + + def filter(self, property_operator, value): + self.filters.append((property_operator, value)) + return self + + def fetch(self, limit, offset=0): + """Not currently fully supported, but we can use this + to allow them to set a limit in a chainable method""" + self.limit = limit + self.offset = offset + return self + + def count(self, quick=True): + return self.manager.count(self.model_class, self.filters, quick, self.sort_by, self.select) + + def get_query(self): + return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by, self.select) + + def order(self, key): + self.sort_by = key + return self + + def to_xml(self, doc=None): + if not doc: + xmlmanager = self.model_class.get_xmlmanager() + doc = xmlmanager.new_doc() + for obj in self: + obj.to_xml(doc) + return doc + + def get_next_token(self): + if self.rs: + return self.rs.next_token + if self._next_token: + return self._next_token + return None + + def set_next_token(self, token): + self._next_token = token + + next_token = property(get_next_token, set_next_token) diff --git a/ext/boto/sdb/db/sequence.py b/ext/boto/sdb/db/sequence.py new file mode 100644 index 0000000000..a28798930c --- /dev/null +++ b/ext/boto/sdb/db/sequence.py @@ -0,0 +1,224 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBResponseError +from boto.compat import six + +class SequenceGenerator(object): + """Generic Sequence Generator object, this takes a single + string as the "sequence" and uses that to figure out + what the next value in a string is. For example + if you give "ABC" and pass in "A" it will give you "B", + and if you give it "C" it will give you "AA". + + If you set "rollover" to True in the above example, passing + in "C" would give you "A" again. + + The Sequence string can be a string or any iterable + that has the "index" function and is indexable. + """ + __name__ = "SequenceGenerator" + + def __init__(self, sequence_string, rollover=False): + """Create a new SequenceGenerator using the sequence_string + as how to generate the next item. + + :param sequence_string: The string or list that explains + how to generate the next item in the sequence + :type sequence_string: str,iterable + + :param rollover: Rollover instead of incrementing when + we hit the end of the sequence + :type rollover: bool + """ + self.sequence_string = sequence_string + self.sequence_length = len(sequence_string[0]) + self.rollover = rollover + self.last_item = sequence_string[-1] + self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string) + + def __call__(self, val, last=None): + """Get the next value in the sequence""" + # If they pass us in a string that's not at least + # the lenght of our sequence, then return the + # first element in our sequence + if val is None or len(val) < self.sequence_length: + return self.sequence_string[0] + last_value = val[-self.sequence_length:] + if (not self.rollover) and (last_value == self.last_item): + val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value)) + else: + val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value)) + return val + + def _inc(self, val): + """Increment a single value""" + assert(len(val) == self.sequence_length) + return self.sequence_string[(self.sequence_string.index(val) + 1) % len(self.sequence_string)] + + +# +# Simple Sequence Functions +# +def increment_by_one(cv=None, lv=None): + if cv is None: + return 0 + return cv + 1 + +def double(cv=None, lv=None): + if cv is None: + return 1 + return cv * 2 + +def fib(cv=1, lv=0): + """The fibonacci sequence, this incrementer uses the + last value""" + if cv is None: + cv = 1 + if lv is None: + lv = 0 + return cv + lv + +increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + +class Sequence(object): + """A simple Sequence using the new SDB "Consistent" features + Based largly off of the "Counter" example from mitch garnaat: + http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" + + def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): + """Create a new Sequence, using an optional function to + increment to the next number, by default we just increment by one. + Every parameter here is optional, if you don't specify any options + then you'll get a new SequenceGenerator with a random ID stored in the + default domain that increments by one and uses the default botoweb + environment + + :param id: Optional ID (name) for this counter + :type id: str + + :param domain_name: Optional domain name to use, by default we get this out of the + environment configuration + :type domain_name:str + + :param fnc: Optional function to use for the incrementation, by default we just increment by one + There are several functions defined in this module. + Your function must accept "None" to get the initial value + :type fnc: function, str + + :param init_val: Initial value, by default this is the first element in your sequence, + but you can pass in any value, even a string if you pass in a function that uses + strings instead of ints to increment + """ + self._db = None + self._value = None + self.last_value = None + self.domain_name = domain_name + self.id = id + if init_val is None: + init_val = fnc(init_val) + + if self.id is None: + import uuid + self.id = str(uuid.uuid4()) + + self.item_type = type(fnc(None)) + self.timestamp = None + # Allow us to pass in a full name to a function + if isinstance(fnc, six.string_types): + from boto.utils import find_class + fnc = find_class(fnc) + self.fnc = fnc + + # Bootstrap the value last + if not self.val: + self.val = init_val + + def set(self, val): + """Set the value""" + import time + now = time.time() + expected_value = [] + new_val = {} + new_val['timestamp'] = now + if self._value is not None: + new_val['last_value'] = self._value + expected_value = ['current_value', str(self._value)] + new_val['current_value'] = val + try: + self.db.put_attributes(self.id, new_val, expected_value=expected_value) + self.timestamp = new_val['timestamp'] + except SDBResponseError as e: + if e.status == 409: + raise ValueError("Sequence out of sync") + else: + raise + + + def get(self): + """Get the value""" + val = self.db.get_attributes(self.id, consistent_read=True) + if val: + if 'timestamp' in val: + self.timestamp = val['timestamp'] + if 'current_value' in val: + self._value = self.item_type(val['current_value']) + if "last_value" in val and val['last_value'] is not None: + self.last_value = self.item_type(val['last_value']) + return self._value + + val = property(get, set) + + def __repr__(self): + return "%s('%s', '%s', '%s.%s', '%s')" % ( + self.__class__.__name__, + self.id, + self.domain_name, + self.fnc.__module__, self.fnc.__name__, + self.val) + + + def _connect(self): + """Connect to our domain""" + if not self._db: + import boto + sdb = boto.connect_sdb() + if not self.domain_name: + self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) + try: + self._db = sdb.get_domain(self.domain_name) + except SDBResponseError as e: + if e.status == 400: + self._db = sdb.create_domain(self.domain_name) + else: + raise + return self._db + + db = property(_connect) + + def next(self): + self.val = self.fnc(self.val, self.last_value) + return self.val + + def delete(self): + """Remove this sequence""" + self.db.delete_attributes(self.id) diff --git a/ext/boto/sdb/db/test_db.py b/ext/boto/sdb/db/test_db.py new file mode 100644 index 0000000000..ba2fb3cd5f --- /dev/null +++ b/ext/boto/sdb/db/test_db.py @@ -0,0 +1,231 @@ +import logging +import time +from datetime import datetime + +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty +from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty +from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty +from boto.exception import SDBPersistenceError + +logging.basicConfig() +log = logging.getLogger('test_db') +log.setLevel(logging.DEBUG) + +_objects = {} + +# +# This will eventually be moved to the boto.tests module and become a real unit test +# but for now it will live here. It shows examples of each of the Property types in +# use and tests the basic operations. +# +class TestBasic(Model): + + name = StringProperty() + size = IntegerProperty() + foo = BooleanProperty() + date = DateTimeProperty() + +class TestFloat(Model): + + name = StringProperty() + value = FloatProperty() + +class TestRequired(Model): + + req = StringProperty(required=True, default='foo') + +class TestReference(Model): + + ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs') + +class TestSubClass(TestBasic): + + answer = IntegerProperty() + +class TestPassword(Model): + password = PasswordProperty() + +class TestList(Model): + + name = StringProperty() + nums = ListProperty(int) + +class TestMap(Model): + + name = StringProperty() + map = MapProperty() + +class TestListReference(Model): + + name = StringProperty() + basics = ListProperty(TestBasic) + +class TestAutoNow(Model): + + create_date = DateTimeProperty(auto_now_add=True) + modified_date = DateTimeProperty(auto_now=True) + +class TestUnique(Model): + name = StringProperty(unique=True) + +def test_basic(): + global _objects + t = TestBasic() + t.name = 'simple' + t.size = -42 + t.foo = True + t.date = datetime.now() + log.debug('saving object') + t.put() + _objects['test_basic_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestBasic.get_by_id(t.id) + _objects['test_basic_tt'] = tt + assert tt.id == t.id + l = TestBasic.get_by_id([t.id]) + assert len(l) == 1 + assert l[0].id == t.id + assert t.size == tt.size + assert t.foo == tt.foo + assert t.name == tt.name + #assert t.date == tt.date + return t + +def test_float(): + global _objects + t = TestFloat() + t.name = 'float object' + t.value = 98.6 + log.debug('saving object') + t.save() + _objects['test_float_t'] = t + time.sleep(5) + log.debug('now try retrieving it') + tt = TestFloat.get_by_id(t.id) + _objects['test_float_tt'] = tt + assert tt.id == t.id + assert tt.name == t.name + assert tt.value == t.value + return t + +def test_required(): + global _objects + t = TestRequired() + _objects['test_required_t'] = t + t.put() + return t + +def test_reference(t=None): + global _objects + if not t: + t = test_basic() + tt = TestReference() + tt.ref = t + tt.put() + time.sleep(10) + tt = TestReference.get_by_id(tt.id) + _objects['test_reference_tt'] = tt + assert tt.ref.id == t.id + for o in t.refs: + log.debug(o) + +def test_subclass(): + global _objects + t = TestSubClass() + _objects['test_subclass_t'] = t + t.name = 'a subclass' + t.size = -489 + t.save() + +def test_password(): + global _objects + t = TestPassword() + _objects['test_password_t'] = t + t.password = "foo" + t.save() + time.sleep(5) + # Make sure it stored ok + tt = TestPassword.get_by_id(t.id) + _objects['test_password_tt'] = tt + #Testing password equality + assert tt.password == "foo" + #Testing password not stored as string + assert str(tt.password) != "foo" + +def test_list(): + global _objects + t = TestList() + _objects['test_list_t'] = t + t.name = 'a list of ints' + t.nums = [1, 2, 3, 4, 5] + t.put() + tt = TestList.get_by_id(t.id) + _objects['test_list_tt'] = tt + assert tt.name == t.name + for n in tt.nums: + assert isinstance(n, int) + +def test_list_reference(): + global _objects + t = TestBasic() + t.put() + _objects['test_list_ref_t'] = t + tt = TestListReference() + tt.name = "foo" + tt.basics = [t] + tt.put() + time.sleep(5) + _objects['test_list_ref_tt'] = tt + ttt = TestListReference.get_by_id(tt.id) + assert ttt.basics[0].id == t.id + +def test_unique(): + global _objects + t = TestUnique() + name = 'foo' + str(int(time.time())) + t.name = name + t.put() + _objects['test_unique_t'] = t + time.sleep(10) + tt = TestUnique() + _objects['test_unique_tt'] = tt + tt.name = name + try: + tt.put() + assert False + except(SDBPersistenceError): + pass + +def test_datetime(): + global _objects + t = TestAutoNow() + t.put() + _objects['test_datetime_t'] = t + time.sleep(5) + tt = TestAutoNow.get_by_id(t.id) + assert tt.create_date.timetuple() == t.create_date.timetuple() + +def test(): + log.info('test_basic') + t1 = test_basic() + log.info('test_required') + test_required() + log.info('test_reference') + test_reference(t1) + log.info('test_subclass') + test_subclass() + log.info('test_password') + test_password() + log.info('test_list') + test_list() + log.info('test_list_reference') + test_list_reference() + log.info("test_datetime") + test_datetime() + log.info('test_unique') + test_unique() + +if __name__ == "__main__": + test() diff --git a/ext/boto/sdb/domain.py b/ext/boto/sdb/domain.py new file mode 100644 index 0000000000..faed813326 --- /dev/null +++ b/ext/boto/sdb/domain.py @@ -0,0 +1,380 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from __future__ import print_function + +""" +Represents an SDB Domain +""" + +from boto.sdb.queryresultset import SelectResultSet +from boto.compat import six + +class Domain(object): + + def __init__(self, connection=None, name=None): + self.connection = connection + self.name = name + self._metadata = None + + def __repr__(self): + return 'Domain:%s' % self.name + + def __iter__(self): + return iter(self.select("SELECT * FROM `%s`" % self.name)) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DomainName': + self.name = value + else: + setattr(self, name, value) + + def get_metadata(self): + if not self._metadata: + self._metadata = self.connection.domain_metadata(self) + return self._metadata + + def put_attributes(self, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be + of the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute + "name" of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or non-existence + (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.put_attributes(self, item_name, attributes, + replace, expected_value) + + def batch_put_attributes(self, items, replace=True): + """ + Store attributes for multiple items. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_put_attributes(self, items, replace) + + def get_attributes(self, item_name, attribute_name=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. This + parameter is optional. If not supplied, all attributes + will be retrieved for the item. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item mapping type containing the requested attribute name/values + """ + return self.connection.get_attributes(self, item_name, attribute_name, + consistent_read, item) + + def delete_attributes(self, item_name, attributes=None, + expected_values=None): + """ + Delete attributes from a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which will cause + all values associated with that attribute name to be deleted or + a dict or Item containing the attribute names and keys and list + of values to delete as the value. If no value is supplied, + all attribute name/values for the item will be deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. The list can be of + the form: + + * ['name', 'value'] + + In which case the call will first verify that the attribute "name" + of this item has a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed error will be + returned. The list can also be of the form: + + * ['name', True|False] + + which will simply check for the existence (True) or + non-existence (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_attributes(self, item_name, attributes, + expected_values) + + def batch_delete_attributes(self, items): + """ + Delete multiple items in this domain. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are either: + + * dictionaries of attribute names/values, exactly the + same as the attribute_names parameter of the scalar + put_attributes call. The attribute name/value pairs + will only be deleted if they match the name/value + pairs passed in. + * None which means that all attributes associated + with the item should be deleted. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_delete_attributes(self, items) + + def select(self, query='', next_token=None, consistent_read=False, max_items=None): + """ + Returns a set of Attributes for item names within domain_name that match the query. + The query must be expressed in using the SELECT style syntax rather than the + original SimpleDB query language. + + :type query: string + :param query: The SimpleDB query to be performed. + + :rtype: iter + :return: An iterator containing the results. This is actually a generator + function that will iterate across all search results, not just the + first page. + """ + return SelectResultSet(self, query, max_items=max_items, next_token=next_token, + consistent_read=consistent_read) + + def get_item(self, item_name, consistent_read=False): + """ + Retrieves an item from the domain, along with all of its attributes. + + :param string item_name: The name of the item to retrieve. + :rtype: :class:`boto.sdb.item.Item` or ``None`` + :keyword bool consistent_read: When set to true, ensures that the most + recent data is returned. + :return: The requested item, or ``None`` if there was no match found + """ + item = self.get_attributes(item_name, consistent_read=consistent_read) + if item: + item.domain = self + return item + else: + return None + + def new_item(self, item_name): + return self.connection.item_cls(self, item_name) + + def delete_item(self, item): + self.delete_attributes(item.name) + + def to_xml(self, f=None): + """Get this domain as an XML DOM Document + :param f: Optional File to dump directly to + :type f: File or Stream + + :return: File object where the XML has been dumped to + :rtype: file + """ + if not f: + from tempfile import TemporaryFile + f = TemporaryFile() + print('', file=f) + print('' % self.name, file=f) + for item in self: + print('\t' % item.name, file=f) + for k in item: + print('\t\t' % k, file=f) + values = item[k] + if not isinstance(values, list): + values = [values] + for value in values: + print('\t\t\t', file=f) + print('\t\t', file=f) + print('\t', file=f) + print('', file=f) + f.flush() + f.seek(0) + return f + + + def from_xml(self, doc): + """Load this domain based on an XML document""" + import xml.sax + handler = DomainDumpParser(self) + xml.sax.parse(doc, handler) + return handler + + def delete(self): + """ + Delete this domain, and all items under it + """ + return self.connection.delete_domain(self) + + +class DomainMetaData(object): + + def __init__(self, domain=None): + self.domain = domain + self.item_count = None + self.item_names_size = None + self.attr_name_count = None + self.attr_names_size = None + self.attr_value_count = None + self.attr_values_size = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ItemCount': + self.item_count = int(value) + elif name == 'ItemNamesSizeBytes': + self.item_names_size = int(value) + elif name == 'AttributeNameCount': + self.attr_name_count = int(value) + elif name == 'AttributeNamesSizeBytes': + self.attr_names_size = int(value) + elif name == 'AttributeValueCount': + self.attr_value_count = int(value) + elif name == 'AttributeValuesSizeBytes': + self.attr_values_size = int(value) + elif name == 'Timestamp': + self.timestamp = value + else: + setattr(self, name, value) + +import sys +from xml.sax.handler import ContentHandler +class DomainDumpParser(ContentHandler): + """ + SAX parser for a domain that has been dumped + """ + + def __init__(self, domain): + self.uploader = UploaderThread(domain) + self.item_id = None + self.attrs = {} + self.attribute = None + self.value = "" + self.domain = domain + + def startElement(self, name, attrs): + if name == "Item": + self.item_id = attrs['id'] + self.attrs = {} + elif name == "attribute": + self.attribute = attrs['id'] + elif name == "value": + self.value = "" + + def characters(self, ch): + self.value += ch + + def endElement(self, name): + if name == "value": + if self.value and self.attribute: + value = self.value.strip() + attr_name = self.attribute.strip() + if attr_name in self.attrs: + self.attrs[attr_name].append(value) + else: + self.attrs[attr_name] = [value] + elif name == "Item": + self.uploader.items[self.item_id] = self.attrs + # Every 20 items we spawn off the uploader + if len(self.uploader.items) >= 20: + self.uploader.start() + self.uploader = UploaderThread(self.domain) + elif name == "Domain": + # If we're done, spawn off our last Uploader Thread + self.uploader.start() + +from threading import Thread +class UploaderThread(Thread): + """Uploader Thread""" + + def __init__(self, domain): + self.db = domain + self.items = {} + super(UploaderThread, self).__init__() + + def run(self): + try: + self.db.batch_put_attributes(self.items) + except: + print("Exception using batch put, trying regular put instead") + for item_name in self.items: + self.db.put_attributes(item_name, self.items[item_name]) + print(".", end=' ') + sys.stdout.flush() diff --git a/ext/boto/sdb/item.py b/ext/boto/sdb/item.py new file mode 100644 index 0000000000..e09a9d9a2c --- /dev/null +++ b/ext/boto/sdb/item.py @@ -0,0 +1,177 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import base64 + +class Item(dict): + """ + A ``dict`` sub-class that serves as an object representation of a + SimpleDB item. An item in SDB is similar to a row in a relational + database. Items belong to a :py:class:`Domain `, + which is similar to a table in a relational database. + + The keys on instances of this object correspond to attributes that are + stored on the SDB item. + + .. tip:: While it is possible to instantiate this class directly, you may + want to use the convenience methods on :py:class:`boto.sdb.domain.Domain` + for that purpose. For example, :py:meth:`boto.sdb.domain.Domain.get_item`. + """ + def __init__(self, domain, name='', active=False): + """ + :type domain: :py:class:`boto.sdb.domain.Domain` + :param domain: The domain that this item belongs to. + + :param str name: The name of this item. This name will be used when + querying for items using methods like + :py:meth:`boto.sdb.domain.Domain.get_item` + """ + dict.__init__(self) + self.domain = domain + self.name = name + self.active = active + self.request_id = None + self.encoding = None + self.in_attribute = False + self.converter = self.domain.connection.converter + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + self.in_attribute = True + self.encoding = attrs.get('encoding', None) + return None + + def decode_value(self, value): + if self.encoding == 'base64': + self.encoding = None + return base64.decodestring(value) + else: + return value + + def endElement(self, name, value, connection): + if name == 'ItemName': + self.name = self.decode_value(value) + elif name == 'Name': + if self.in_attribute: + self.last_key = self.decode_value(value) + else: + self.name = self.decode_value(value) + elif name == 'Value': + if self.last_key in self: + if not isinstance(self[self.last_key], list): + self[self.last_key] = [self[self.last_key]] + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key].append(value) + else: + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key] = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'RequestId': + self.request_id = value + elif name == 'Attribute': + self.in_attribute = False + else: + setattr(self, name, value) + + def load(self): + """ + Loads or re-loads this item's attributes from SDB. + + .. warning:: + If you have changed attribute values on an Item instance, + this method will over-write the values if they are different in + SDB. For any local attributes that don't yet exist in SDB, + they will be safe. + """ + self.domain.get_attributes(self.name, item=self) + + def save(self, replace=True): + """ + Saves this item to SDB. + + :param bool replace: If ``True``, delete any attributes on the remote + SDB item that have a ``None`` value on this object. + """ + self.domain.put_attributes(self.name, self, replace) + # Delete any attributes set to "None" + if replace: + del_attrs = [] + for name in self: + if self[name] is None: + del_attrs.append(name) + if len(del_attrs) > 0: + self.domain.delete_attributes(self.name, del_attrs) + + def add_value(self, key, value): + """ + Helps set or add to attributes on this item. If you are adding a new + attribute that has yet to be set, it will simply create an attribute + named ``key`` with your given ``value`` as its value. If you are + adding a value to an existing attribute, this method will convert the + attribute to a list (if it isn't already) and append your new value + to said list. + + For clarification, consider the following interactive session: + + .. code-block:: python + + >>> item = some_domain.get_item('some_item') + >>> item.has_key('some_attr') + False + >>> item.add_value('some_attr', 1) + >>> item['some_attr'] + 1 + >>> item.add_value('some_attr', 2) + >>> item['some_attr'] + [1, 2] + + :param str key: The attribute to add a value to. + :param object value: The value to set or append to the attribute. + """ + if key in self: + # We already have this key on the item. + if not isinstance(self[key], list): + # The key isn't already a list, take its current value and + # convert it to a list with the only member being the + # current value. + self[key] = [self[key]] + # Add the new value to the list. + self[key].append(value) + else: + # This is a new attribute, just set it. + self[key] = value + + def delete(self): + """ + Deletes this item in SDB. + + .. note:: This local Python object remains in its current state + after deletion, this only deletes the remote item in SDB. + """ + self.domain.delete_item(self) diff --git a/ext/boto/sdb/queryresultset.py b/ext/boto/sdb/queryresultset.py new file mode 100644 index 0000000000..54f35238b6 --- /dev/null +++ b/ext/boto/sdb/queryresultset.py @@ -0,0 +1,93 @@ +from boto.compat import six +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def query_lister(domain, query='', max_items=None, attr_names=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.query_with_attributes(domain, query, attr_names, + next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token is not None + +class QueryResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, attr_names=None): + self.max_items = max_items + self.domain = domain + self.query = query + self.attr_names = attr_names + + def __iter__(self): + return query_lister(self.domain, self.query, self.max_items, self.attr_names) + +def select_lister(domain, query='', max_items=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.select(domain, query, next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token is not None + +class SelectResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, + next_token=None, consistent_read=False): + self.domain = domain + self.query = query + self.consistent_read = consistent_read + self.max_items = max_items + self.next_token = next_token + + def __iter__(self): + more_results = True + num_results = 0 + while more_results: + rs = self.domain.connection.select(self.domain, self.query, + next_token=self.next_token, + consistent_read=self.consistent_read) + for item in rs: + if self.max_items and num_results >= self.max_items: + raise StopIteration + yield item + num_results += 1 + self.next_token = rs.next_token + if self.max_items and num_results >= self.max_items: + raise StopIteration + more_results = self.next_token is not None + + def next(self): + return next(self.__iter__()) diff --git a/ext/boto/sdb/regioninfo.py b/ext/boto/sdb/regioninfo.py new file mode 100644 index 0000000000..cb0211e161 --- /dev/null +++ b/ext/boto/sdb/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SDBRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.sdb.connection import SDBConnection + super(SDBRegionInfo, self).__init__(connection, name, endpoint, + SDBConnection) diff --git a/ext/boto/services/__init__.py b/ext/boto/services/__init__.py new file mode 100644 index 0000000000..449bd162a8 --- /dev/null +++ b/ext/boto/services/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/ext/boto/services/bs.py b/ext/boto/services/bs.py new file mode 100644 index 0000000000..396c483975 --- /dev/null +++ b/ext/boto/services/bs.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from optparse import OptionParser +from boto.services.servicedef import ServiceDef +from boto.services.submit import Submitter +from boto.services.result import ResultProcessor +import boto +import sys, os +from boto.compat import StringIO + +class BS(object): + + Usage = "usage: %prog [options] config_file command" + + Commands = {'reset' : 'Clear input queue and output bucket', + 'submit' : 'Submit local files to the service', + 'start' : 'Start the service', + 'status' : 'Report on the status of the service buckets and queues', + 'retrieve' : 'Retrieve output generated by a batch', + 'batches' : 'List all batches stored in current output_domain'} + + def __init__(self): + self.service_name = None + self.parser = OptionParser(usage=self.Usage) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option("-a", "--access-key", action="store", type="string", + help="your AWS Access Key") + self.parser.add_option("-s", "--secret-key", action="store", type="string", + help="your AWS Secret Access Key") + self.parser.add_option("-p", "--path", action="store", type="string", dest="path", + help="the path to local directory for submit and retrieve") + self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair", + help="the SSH keypair used with launched instance(s)") + self.parser.add_option("-l", "--leave", action="store_true", dest="leave", + help="leave the files (don't retrieve) files during retrieve command") + self.parser.set_defaults(leave=False) + self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances", + help="the number of launched instance(s)") + self.parser.set_defaults(num_instances=1) + self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore", + help="directories that should be ignored by submit command") + self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch", + help="batch identifier required by the retrieve command") + + def print_command_help(self): + print('\nCommands:') + for key in self.Commands.keys(): + print(' %s\t\t%s' % (key, self.Commands[key])) + + def do_reset(self): + iq = self.sd.get_obj('input_queue') + if iq: + print('clearing out input queue') + i = 0 + m = iq.read() + while m: + i += 1 + iq.delete_message(m) + m = iq.read() + print('deleted %d messages' % i) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + print('delete generated files in output bucket') + i = 0 + for k in ob: + i += 1 + k.delete() + print('deleted %d keys' % i) + + def do_submit(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + s = Submitter(self.sd) + t = s.submit_path(self.options.path, None, self.options.ignore, None, + None, True, self.options.path) + print('A total of %d files were submitted' % t[1]) + print('Batch Identifier: %s' % t[0]) + + def do_start(self): + ami_id = self.sd.get('ami_id') + instance_type = self.sd.get('instance_type', 'm1.small') + security_group = self.sd.get('security_group', 'default') + if not ami_id: + self.parser.error('ami_id option is required when starting the service') + ec2 = boto.connect_ec2() + if not self.sd.has_section('Credentials'): + self.sd.add_section('Credentials') + self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + s = StringIO() + self.sd.write(s) + rs = ec2.get_all_images([ami_id]) + img = rs[0] + r = img.run(user_data=s.getvalue(), key_name=self.options.keypair, + max_count=self.options.num_instances, + instance_type=instance_type, + security_groups=[security_group]) + print('Starting AMI: %s' % ami_id) + print('Reservation %s contains the following instances:' % r.id) + for i in r.instances: + print('\t%s' % i.id) + + def do_status(self): + iq = self.sd.get_obj('input_queue') + if iq: + print('The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count())) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + total = 0 + for k in ob: + total += 1 + print('The output_bucket (%s) contains %d keys' % (ob.name, total)) + + def do_retrieve(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + if not self.options.batch: + self.parser.error('batch identifier is required for retrieve command') + s = ResultProcessor(self.options.batch, self.sd) + s.get_results(self.options.path, get_file=(not self.options.leave)) + + def do_batches(self): + d = self.sd.get_obj('output_domain') + if d: + print('Available Batches:') + rs = d.query("['type'='Batch']") + for item in rs: + print(' %s' % item.name) + else: + self.parser.error('No output_domain specified for service') + + def main(self): + self.options, self.args = self.parser.parse_args() + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) != 2: + self.parser.error("config_file and command are required") + self.config_file = self.args[0] + self.sd = ServiceDef(self.config_file) + self.command = self.args[1] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + +if __name__ == "__main__": + bs = BS() + bs.main() diff --git a/ext/boto/services/message.py b/ext/boto/services/message.py new file mode 100644 index 0000000000..31f37019fc --- /dev/null +++ b/ext/boto/services/message.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sqs.message import MHMessage +from boto.utils import get_ts +from socket import gethostname +import os, mimetypes, time + +class ServiceMessage(MHMessage): + + def for_key(self, key, params=None, bucket_name=None): + if params: + self.update(params) + if key.path: + t = os.path.split(key.path) + self['OriginalLocation'] = t[0] + self['OriginalFileName'] = t[1] + mime_type = mimetypes.guess_type(t[1])[0] + if mime_type is None: + mime_type = 'application/octet-stream' + self['Content-Type'] = mime_type + s = os.stat(key.path) + t = time.gmtime(s[7]) + self['FileAccessedDate'] = get_ts(t) + t = time.gmtime(s[8]) + self['FileModifiedDate'] = get_ts(t) + t = time.gmtime(s[9]) + self['FileCreateDate'] = get_ts(t) + else: + self['OriginalFileName'] = key.name + self['OriginalLocation'] = key.bucket.name + self['ContentType'] = key.content_type + self['Host'] = gethostname() + if bucket_name: + self['Bucket'] = bucket_name + else: + self['Bucket'] = key.bucket.name + self['InputKey'] = key.name + self['Size'] = key.size + diff --git a/ext/boto/services/result.py b/ext/boto/services/result.py new file mode 100644 index 0000000000..879934323b --- /dev/null +++ b/ext/boto/services/result.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import os +from datetime import datetime, timedelta +from boto.utils import parse_ts +import boto + +class ResultProcessor(object): + + LogFileName = 'log.csv' + + def __init__(self, batch_name, sd, mimetype_files=None): + self.sd = sd + self.batch = batch_name + self.log_fp = None + self.num_files = 0 + self.total_time = 0 + self.min_time = timedelta.max + self.max_time = timedelta.min + self.earliest_time = datetime.max + self.latest_time = datetime.min + self.queue = self.sd.get_obj('output_queue') + self.domain = self.sd.get_obj('output_domain') + + def calculate_stats(self, msg): + start_time = parse_ts(msg['Service-Read']) + end_time = parse_ts(msg['Service-Write']) + elapsed_time = end_time - start_time + if elapsed_time > self.max_time: + self.max_time = elapsed_time + if elapsed_time < self.min_time: + self.min_time = elapsed_time + self.total_time += elapsed_time.seconds + if start_time < self.earliest_time: + self.earliest_time = start_time + if end_time > self.latest_time: + self.latest_time = end_time + + def log_message(self, msg, path): + keys = sorted(msg.keys()) + if not self.log_fp: + self.log_fp = open(os.path.join(path, self.LogFileName), 'a') + line = ','.join(keys) + self.log_fp.write(line+'\n') + values = [] + for key in keys: + value = msg[key] + if value.find(',') > 0: + value = '"%s"' % value + values.append(value) + line = ','.join(values) + self.log_fp.write(line+'\n') + + def process_record(self, record, path, get_file=True): + self.log_message(record, path) + self.calculate_stats(record) + outputs = record['OutputKey'].split(',') + if 'OutputBucket' in record: + bucket = boto.lookup('s3', record['OutputBucket']) + else: + bucket = boto.lookup('s3', record['Bucket']) + for output in outputs: + if get_file: + key_name = output.split(';')[0] + key = bucket.lookup(key_name) + file_name = os.path.join(path, key_name) + print('retrieving file: %s to %s' % (key_name, file_name)) + key.get_contents_to_filename(file_name) + self.num_files += 1 + + def get_results_from_queue(self, path, get_file=True, delete_msg=True): + m = self.queue.read() + while m: + if 'Batch' in m and m['Batch'] == self.batch: + self.process_record(m, path, get_file) + if delete_msg: + self.queue.delete_message(m) + m = self.queue.read() + + def get_results_from_domain(self, path, get_file=True): + rs = self.domain.query("['Batch'='%s']" % self.batch) + for item in rs: + self.process_record(item, path, get_file) + + def get_results_from_bucket(self, path): + bucket = self.sd.get_obj('output_bucket') + if bucket: + print('No output queue or domain, just retrieving files from output_bucket') + for key in bucket: + file_name = os.path.join(path, key) + print('retrieving file: %s to %s' % (key, file_name)) + key.get_contents_to_filename(file_name) + self.num_files + 1 + + def get_results(self, path, get_file=True, delete_msg=True): + if not os.path.isdir(path): + os.mkdir(path) + if self.queue: + self.get_results_from_queue(path, get_file) + elif self.domain: + self.get_results_from_domain(path, get_file) + else: + self.get_results_from_bucket(path) + if self.log_fp: + self.log_fp.close() + print('%d results successfully retrieved.' % self.num_files) + if self.num_files > 0: + self.avg_time = float(self.total_time)/self.num_files + print('Minimum Processing Time: %d' % self.min_time.seconds) + print('Maximum Processing Time: %d' % self.max_time.seconds) + print('Average Processing Time: %f' % self.avg_time) + self.elapsed_time = self.latest_time-self.earliest_time + print('Elapsed Time: %d' % self.elapsed_time.seconds) + tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files) + print('Throughput: %f transactions / minute' % tput) + diff --git a/ext/boto/services/service.py b/ext/boto/services/service.py new file mode 100644 index 0000000000..e1a04c8ec2 --- /dev/null +++ b/ext/boto/services/service.py @@ -0,0 +1,161 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.message import ServiceMessage +from boto.services.servicedef import ServiceDef +from boto.pyami.scriptbase import ScriptBase +from boto.utils import get_ts +import time +import os +import mimetypes + + +class Service(ScriptBase): + + # Time required to process a transaction + ProcessingTime = 60 + + def __init__(self, config_file=None, mimetype_files=None): + super(Service, self).__init__(config_file) + self.name = self.__class__.__name__ + self.working_dir = boto.config.get('Pyami', 'working_dir') + self.sd = ServiceDef(config_file) + self.retry_count = self.sd.getint('retry_count', 5) + self.loop_delay = self.sd.getint('loop_delay', 30) + self.processing_time = self.sd.getint('processing_time', 60) + self.input_queue = self.sd.get_obj('input_queue') + self.output_queue = self.sd.get_obj('output_queue') + self.output_domain = self.sd.get_obj('output_domain') + if mimetype_files: + mimetypes.init(mimetype_files) + + def split_key(key): + if key.find(';') < 0: + t = (key, '') + else: + key, type = key.split(';') + label, mtype = type.split('=') + t = (key, mtype) + return t + + def read_message(self): + boto.log.info('read_message') + message = self.input_queue.read(self.processing_time) + if message: + boto.log.info(message.get_body()) + key = 'Service-Read' + message[key] = get_ts() + return message + + # retrieve the source file from S3 + def get_file(self, message): + bucket_name = message['Bucket'] + key_name = message['InputKey'] + file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file')) + boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.get_contents_to_filename(os.path.join(self.working_dir, file_name)) + return file_name + + # process source file, return list of output files + def process_file(self, in_file_name, msg): + return [] + + # store result file in S3 + def put_file(self, bucket_name, file_path, key_name=None): + boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.set_contents_from_filename(file_path) + return key + + def save_results(self, results, input_message, output_message): + output_keys = [] + for file, type in results: + if 'OutputBucket' in input_message: + output_bucket = input_message['OutputBucket'] + else: + output_bucket = input_message['Bucket'] + key_name = os.path.split(file)[1] + key = self.put_file(output_bucket, file, key_name) + output_keys.append('%s;type=%s' % (key.name, type)) + output_message['OutputKey'] = ','.join(output_keys) + + # write message to each output queue + def write_message(self, message): + message['Service-Write'] = get_ts() + message['Server'] = self.name + if 'HOSTNAME' in os.environ: + message['Host'] = os.environ['HOSTNAME'] + else: + message['Host'] = 'unknown' + message['Instance-ID'] = self.instance_id + if self.output_queue: + boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id) + self.output_queue.write(message) + if self.output_domain: + boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name) + item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']]) + self.output_domain.put_attributes(item_name, message) + + # delete message from input queue + def delete_message(self, message): + boto.log.info('deleting message from %s' % self.input_queue.id) + self.input_queue.delete_message(message) + + # to clean up any files, etc. after each iteration + def cleanup(self): + pass + + def shutdown(self): + on_completion = self.sd.get('on_completion', 'shutdown') + if on_completion == 'shutdown': + if self.instance_id: + time.sleep(60) + c = boto.connect_ec2() + c.terminate_instances([self.instance_id]) + + def main(self, notify=False): + self.notify('Service: %s Starting' % self.name) + empty_reads = 0 + while self.retry_count < 0 or empty_reads < self.retry_count: + try: + input_message = self.read_message() + if input_message: + empty_reads = 0 + output_message = ServiceMessage(None, input_message.get_body()) + input_file = self.get_file(input_message) + results = self.process_file(input_file, output_message) + self.save_results(results, input_message, output_message) + self.write_message(output_message) + self.delete_message(input_message) + self.cleanup() + else: + empty_reads += 1 + time.sleep(self.loop_delay) + except Exception: + boto.log.exception('Service Failed') + empty_reads += 1 + self.notify('Service: %s Shutting Down' % self.name) + self.shutdown() + diff --git a/ext/boto/services/servicedef.py b/ext/boto/services/servicedef.py new file mode 100644 index 0000000000..a43b3f342a --- /dev/null +++ b/ext/boto/services/servicedef.py @@ -0,0 +1,91 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.pyami.config import Config +from boto.services.message import ServiceMessage +import boto + +class ServiceDef(Config): + + def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): + super(ServiceDef, self).__init__(config_file) + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + script = Config.get(self, 'Pyami', 'scripts') + if script: + self.name = script.split('.')[-1] + else: + self.name = None + + + def get(self, name, default=None): + return super(ServiceDef, self).get(self.name, name, default) + + def has_option(self, option): + return super(ServiceDef, self).has_option(self.name, option) + + def getint(self, option, default=0): + try: + val = super(ServiceDef, self).get(self.name, option) + val = int(val) + except: + val = int(default) + return val + + def getbool(self, option, default=False): + try: + val = super(ServiceDef, self).get(self.name, option) + if val.lower() == 'true': + val = True + else: + val = False + except: + val = default + return val + + def get_obj(self, name): + """ + Returns the AWS object associated with a given option. + + The heuristics used are a bit lame. If the option name contains + the word 'bucket' it is assumed to be an S3 bucket, if the name + contains the word 'queue' it is assumed to be an SQS queue and + if it contains the word 'domain' it is assumed to be a SimpleDB + domain. If the option name specified does not exist in the + config file or if the AWS object cannot be retrieved this + returns None. + """ + val = self.get(name) + if not val: + return None + if name.find('queue') >= 0: + obj = boto.lookup('sqs', val) + if obj: + obj.set_message_class(ServiceMessage) + elif name.find('bucket') >= 0: + obj = boto.lookup('s3', val) + elif name.find('domain') >= 0: + obj = boto.lookup('sdb', val) + else: + obj = None + return obj + + diff --git a/ext/boto/services/sonofmmm.cfg b/ext/boto/services/sonofmmm.cfg new file mode 100644 index 0000000000..d70d3794d5 --- /dev/null +++ b/ext/boto/services/sonofmmm.cfg @@ -0,0 +1,43 @@ +# +# Your AWS Credentials +# You only need to supply these in this file if you are not using +# the boto tools to start your service +# +#[Credentials] +#aws_access_key_id = +#aws_secret_access_key = + +# +# Fill out this section if you want emails from the service +# when it starts and stops +# +#[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +[Pyami] +scripts = boto.services.sonofmmm.SonOfMMM + +[SonOfMMM] +# id of the AMI to be launched +ami_id = ami-dc799cb5 +# number of times service will read an empty queue before exiting +# a negative value will cause the service to run forever +retry_count = 5 +# seconds to wait after empty queue read before reading again +loop_delay = 10 +# average time it takes to process a transaction +# controls invisibility timeout of messages +processing_time = 60 +ffmpeg_args = -y -i %%s -f mov -r 29.97 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -ar 48000 -ab 19200 -s 320x240 -vcodec mpeg4 -acodec libfaac %%s +output_mimetype = video/quicktime +output_ext = .mov +input_bucket = +output_bucket = +output_domain = +output_queue = +input_queue = + diff --git a/ext/boto/services/sonofmmm.py b/ext/boto/services/sonofmmm.py new file mode 100644 index 0000000000..3ef60838b8 --- /dev/null +++ b/ext/boto/services/sonofmmm.py @@ -0,0 +1,81 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.service import Service +from boto.services.message import ServiceMessage +import os +import mimetypes + +class SonOfMMM(Service): + + def __init__(self, config_file=None): + super(SonOfMMM, self).__init__(config_file) + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.working_dir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + if self.sd.has_option('ffmpeg_args'): + self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args') + else: + self.command = '/usr/local/bin/ffmpeg -y -i %s %s' + self.output_mimetype = self.sd.get('output_mimetype') + if self.sd.has_option('output_ext'): + self.output_ext = self.sd.get('output_ext') + else: + self.output_ext = mimetypes.guess_extension(self.output_mimetype) + self.output_bucket = self.sd.get_obj('output_bucket') + self.input_bucket = self.sd.get_obj('input_bucket') + # check to see if there are any messages queue + # if not, create messages for all files in input_bucket + m = self.input_queue.read(1) + if not m: + self.queue_files() + + def queue_files(self): + boto.log.info('Queueing files from %s' % self.input_bucket.name) + for key in self.input_bucket: + boto.log.info('Queueing %s' % key.name) + m = ServiceMessage() + if self.output_bucket: + d = {'OutputBucket' : self.output_bucket.name} + else: + d = None + m.for_key(key, d) + self.input_queue.write(m) + + def process_file(self, in_file_name, msg): + base, ext = os.path.splitext(in_file_name) + out_file_name = os.path.join(self.working_dir, + base+self.output_ext) + command = self.command % (in_file_name, out_file_name) + boto.log.info('running:\n%s' % command) + status = self.run(command) + if status == 0: + return [(out_file_name, self.output_mimetype)] + else: + return [] + + def shutdown(self): + if os.path.isfile(self.log_path): + if self.output_bucket: + key = self.output_bucket.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + super(SonOfMMM, self).shutdown() diff --git a/ext/boto/services/submit.py b/ext/boto/services/submit.py new file mode 100644 index 0000000000..69be236adb --- /dev/null +++ b/ext/boto/services/submit.py @@ -0,0 +1,87 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import time +import os + + +class Submitter(object): + + def __init__(self, sd): + self.sd = sd + self.input_bucket = self.sd.get_obj('input_bucket') + self.output_bucket = self.sd.get_obj('output_bucket') + self.output_domain = self.sd.get_obj('output_domain') + self.queue = self.sd.get_obj('input_queue') + + def get_key_name(self, fullpath, prefix): + key_name = fullpath[len(prefix):] + l = key_name.split(os.sep) + return '/'.join(l) + + def write_message(self, key, metadata): + if self.queue: + m = self.queue.new_message() + m.for_key(key, metadata) + if self.output_bucket: + m['OutputBucket'] = self.output_bucket.name + self.queue.write(m) + + def submit_file(self, path, metadata=None, cb=None, num_cb=0, prefix='/'): + if not metadata: + metadata = {} + key_name = self.get_key_name(path, prefix) + k = self.input_bucket.new_key(key_name) + k.update_metadata(metadata) + k.set_contents_from_filename(path, replace=False, cb=cb, num_cb=num_cb) + self.write_message(k, metadata) + + def submit_path(self, path, tags=None, ignore_dirs=None, cb=None, num_cb=0, status=False, prefix='/'): + path = os.path.expanduser(path) + path = os.path.expandvars(path) + path = os.path.abspath(path) + total = 0 + metadata = {} + if tags: + metadata['Tags'] = tags + l = [] + for t in time.gmtime(): + l.append(str(t)) + metadata['Batch'] = '_'.join(l) + if self.output_domain: + self.output_domain.put_attributes(metadata['Batch'], {'type' : 'Batch'}) + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + if ignore_dirs: + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for file in files: + fullpath = os.path.join(root, file) + if status: + print('Submitting %s' % fullpath) + self.submit_file(fullpath, metadata, cb, num_cb, prefix) + total += 1 + elif os.path.isfile(path): + self.submit_file(path, metadata, cb, num_cb) + total += 1 + else: + print('problem with %s' % path) + return (metadata['Batch'], total) diff --git a/ext/boto/ses/__init__.py b/ext/boto/ses/__init__.py new file mode 100644 index 0000000000..aab03e3dfb --- /dev/null +++ b/ext/boto/ses/__init__.py @@ -0,0 +1,51 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ses.connection import SESConnection +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the SES service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('ses', connection_cls=SESConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.ses.connection.SESConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.ses.connection.SESConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('ses', region_name, connection_cls=SESConnection, + **kw_params) diff --git a/ext/boto/ses/connection.py b/ext/boto/ses/connection.py new file mode 100644 index 0000000000..244029a07a --- /dev/null +++ b/ext/boto/ses/connection.py @@ -0,0 +1,565 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Harry Marr http://hmarr.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import re +import base64 + +from boto.compat import six, urllib +from boto.connection import AWSAuthConnection +from boto.exception import BotoServerError +from boto.regioninfo import RegionInfo +import boto +import boto.jsonresponse +from boto.ses import exceptions as ses_exceptions + + +class SESConnection(AWSAuthConnection): + + ResponseError = BotoServerError + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'email.us-east-1.amazonaws.com' + APIVersion = '2010-12-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(SESConnection, self).__init__(self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _required_auth_capability(self): + return ['ses'] + + def _build_list_params(self, params, items, label): + """Add an AWS API-compatible parameter list to a dictionary. + + :type params: dict + :param params: The parameter dictionary + + :type items: list + :param items: Items to be included in the list + + :type label: string + :param label: The parameter list's name + """ + if isinstance(items, six.string_types): + items = [items] + for i in range(1, len(items) + 1): + params['%s.%d' % (label, i)] = items[i - 1] + + def _make_request(self, action, params=None): + """Make a call to the SES API. + + :type action: string + :param action: The API method to use (e.g. SendRawEmail) + + :type params: dict + :param params: Parameters that will be sent as POST data with the API + call. + """ + ct = 'application/x-www-form-urlencoded; charset=UTF-8' + headers = {'Content-Type': ct} + params = params or {} + params['Action'] = action + + for k, v in params.items(): + if isinstance(v, six.text_type): # UTF-8 encode only if it's Unicode + params[k] = v.encode('utf-8') + + response = super(SESConnection, self).make_request( + 'POST', + '/', + headers=headers, + data=urllib.parse.urlencode(params) + ) + body = response.read().decode('utf-8') + if response.status == 200: + list_markers = ('VerifiedEmailAddresses', 'Identities', + 'DkimTokens', 'DkimAttributes', + 'VerificationAttributes', 'SendDataPoints') + item_markers = ('member', 'item', 'entry') + + e = boto.jsonresponse.Element(list_marker=list_markers, + item_marker=item_markers) + h = boto.jsonresponse.XmlHandler(e, None) + h.parse(body) + return e + else: + # HTTP codes other than 200 are considered errors. Go through + # some error handling to determine which exception gets raised, + self._handle_error(response, body) + + def _handle_error(self, response, body): + """ + Handle raising the correct exception, depending on the error. Many + errors share the same HTTP response code, meaning we have to get really + kludgey and do string searches to figure out what went wrong. + """ + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + + if "Address blacklisted." in body: + # Delivery failures happened frequently enough with the recipient's + # email address for Amazon to blacklist it. After a day or three, + # they'll be automatically removed, and delivery can be attempted + # again (if you write the code to do so in your application). + ExceptionToRaise = ses_exceptions.SESAddressBlacklistedError + exc_reason = "Address blacklisted." + elif "Email address is not verified." in body: + # This error happens when the "Reply-To" value passed to + # send_email() hasn't been verified yet. + ExceptionToRaise = ses_exceptions.SESAddressNotVerifiedError + exc_reason = "Email address is not verified." + elif "Daily message quota exceeded." in body: + # Encountered when your account exceeds the maximum total number + # of emails per 24 hours. + ExceptionToRaise = ses_exceptions.SESDailyQuotaExceededError + exc_reason = "Daily message quota exceeded." + elif "Maximum sending rate exceeded." in body: + # Your account has sent above its allowed requests a second rate. + ExceptionToRaise = ses_exceptions.SESMaxSendingRateExceededError + exc_reason = "Maximum sending rate exceeded." + elif "Domain ends with dot." in body: + # Recipient address ends with a dot/period. This is invalid. + ExceptionToRaise = ses_exceptions.SESDomainEndsWithDotError + exc_reason = "Domain ends with dot." + elif "Local address contains control or whitespace" in body: + # I think this pertains to the recipient address. + ExceptionToRaise = ses_exceptions.SESLocalAddressCharacterError + exc_reason = "Local address contains control or whitespace." + elif "Illegal address" in body: + # A clearly mal-formed address. + ExceptionToRaise = ses_exceptions.SESIllegalAddressError + exc_reason = "Illegal address" + # The re.search is to distinguish from the + # SESAddressNotVerifiedError error above. + elif re.search('Identity.*is not verified', body): + ExceptionToRaise = ses_exceptions.SESIdentityNotVerifiedError + exc_reason = "Identity is not verified." + elif "ownership not confirmed" in body: + ExceptionToRaise = ses_exceptions.SESDomainNotConfirmedError + exc_reason = "Domain ownership is not confirmed." + else: + # This is either a common AWS error, or one that we don't devote + # its own exception to. + ExceptionToRaise = self.ResponseError + exc_reason = response.reason + + raise ExceptionToRaise(response.status, exc_reason, body) + + def send_email(self, source, subject, body, to_addresses, + cc_addresses=None, bcc_addresses=None, + format='text', reply_addresses=None, + return_path=None, text_body=None, html_body=None): + """Composes an email message based on input data, and then immediately + queues the message for sending. + + :type source: string + :param source: The sender's email address. + + :type subject: string + :param subject: The subject of the message: A short summary of the + content, which will appear in the recipient's inbox. + + :type body: string + :param body: The message body. + + :type to_addresses: list of strings or string + :param to_addresses: The To: field(s) of the message. + + :type cc_addresses: list of strings or string + :param cc_addresses: The CC: field(s) of the message. + + :type bcc_addresses: list of strings or string + :param bcc_addresses: The BCC: field(s) of the message. + + :type format: string + :param format: The format of the message's body, must be either "text" + or "html". + + :type reply_addresses: list of strings or string + :param reply_addresses: The reply-to email address(es) for the + message. If the recipient replies to the + message, each reply-to address will + receive the reply. + + :type return_path: string + :param return_path: The email address to which bounce notifications are + to be forwarded. If the message cannot be delivered + to the recipient, then an error message will be + returned from the recipient's ISP; this message + will then be forwarded to the email address + specified by the ReturnPath parameter. + + :type text_body: string + :param text_body: The text body to send with this email. + + :type html_body: string + :param html_body: The html body to send with this email. + + """ + format = format.lower().strip() + if body is not None: + if format == "text": + if text_body is not None: + raise Warning("You've passed in both a body and a " + "text_body; please choose one or the other.") + text_body = body + else: + if html_body is not None: + raise Warning("You've passed in both a body and an " + "html_body; please choose one or the other.") + html_body = body + + params = { + 'Source': source, + 'Message.Subject.Data': subject, + } + + if return_path: + params['ReturnPath'] = return_path + + if html_body is not None: + params['Message.Body.Html.Data'] = html_body + if text_body is not None: + params['Message.Body.Text.Data'] = text_body + + if(format not in ("text", "html")): + raise ValueError("'format' argument must be 'text' or 'html'") + + if(not (html_body or text_body)): + raise ValueError("No text or html body found for mail") + + self._build_list_params(params, to_addresses, + 'Destination.ToAddresses.member') + if cc_addresses: + self._build_list_params(params, cc_addresses, + 'Destination.CcAddresses.member') + + if bcc_addresses: + self._build_list_params(params, bcc_addresses, + 'Destination.BccAddresses.member') + + if reply_addresses: + self._build_list_params(params, reply_addresses, + 'ReplyToAddresses.member') + + return self._make_request('SendEmail', params) + + def send_raw_email(self, raw_message, source=None, destinations=None): + """Sends an email message, with header and content specified by the + client. The SendRawEmail action is useful for sending multipart MIME + emails, with attachments or inline content. The raw text of the message + must comply with Internet email standards; otherwise, the message + cannot be sent. + + :type source: string + :param source: The sender's email address. Amazon's docs say: + + If you specify the Source parameter, then bounce notifications and + complaints will be sent to this email address. This takes precedence + over any Return-Path header that you might include in the raw text of + the message. + + :type raw_message: string + :param raw_message: The raw text of the message. The client is + responsible for ensuring the following: + + - Message must contain a header and a body, separated by a blank line. + - All required header fields must be present. + - Each part of a multipart MIME message must be formatted properly. + - MIME content types must be among those supported by Amazon SES. + Refer to the Amazon SES Developer Guide for more details. + - Content must be base64-encoded, if MIME requires it. + + :type destinations: list of strings or string + :param destinations: A list of destinations for the message. + + """ + + if isinstance(raw_message, six.text_type): + raw_message = raw_message.encode('utf-8') + + params = { + 'RawMessage.Data': base64.b64encode(raw_message), + } + + if source: + params['Source'] = source + + if destinations: + self._build_list_params(params, destinations, + 'Destinations.member') + + return self._make_request('SendRawEmail', params) + + def list_verified_email_addresses(self): + """Fetch a list of the email addresses that have been verified. + + :rtype: dict + :returns: A ListVerifiedEmailAddressesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListVerifiedEmailAddresses') + + def get_send_quota(self): + """Fetches the user's current activity limits. + + :rtype: dict + :returns: A GetSendQuotaResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendQuota') + + def get_send_statistics(self): + """Fetches the user's sending statistics. The result is a list of data + points, representing the last two weeks of sending activity. + + Each data point in the list contains statistics for a 15-minute + interval. + + :rtype: dict + :returns: A GetSendStatisticsResponse structure. Note that keys must be + unicode strings. + """ + return self._make_request('GetSendStatistics') + + def delete_verified_email_address(self, email_address): + """Deletes the specified email address from the list of verified + addresses. + + :type email_adddress: string + :param email_address: The email address to be removed from the list of + verified addreses. + + :rtype: dict + :returns: A DeleteVerifiedEmailAddressResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('DeleteVerifiedEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_email_address(self, email_address): + """Verifies an email address. This action causes a confirmation email + message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailAddressResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailAddress', { + 'EmailAddress': email_address, + }) + + def verify_domain_dkim(self, domain): + """ + Returns a set of DNS records, or tokens, that must be published in the + domain name's DNS to complete the DKIM verification process. These + tokens are DNS ``CNAME`` records that point to DKIM public keys hosted + by Amazon SES. To complete the DKIM verification process, these tokens + must be published in the domain's DNS. The tokens must remain + published in order for Easy DKIM signing to function correctly. + + After the tokens are added to the domain's DNS, Amazon SES will be able + to DKIM-sign email originating from that domain. To enable or disable + Easy DKIM signing for a domain, use the ``SetIdentityDkimEnabled`` + action. For more information about Easy DKIM, go to the `Amazon SES + Developer Guide + `_. + + :type domain: string + :param domain: The domain name. + + """ + return self._make_request('VerifyDomainDkim', { + 'Domain': domain, + }) + + def set_identity_dkim_enabled(self, identity, dkim_enabled): + """Enables or disables DKIM signing of email sent from an identity. + + * If Easy DKIM signing is enabled for a domain name identity (e.g., + * ``example.com``), + then Amazon SES will DKIM-sign all email sent by addresses under that + domain name (e.g., ``user@example.com``) + * If Easy DKIM signing is enabled for an email address, then Amazon SES + will DKIM-sign all email sent by that email address. + + For email addresses (e.g., ``user@example.com``), you can only enable + Easy DKIM signing if the corresponding domain (e.g., ``example.com``) + has been set up for Easy DKIM using the AWS Console or the + ``VerifyDomainDkim`` action. + + :type identity: string + :param identity: An email address or domain name. + + :type dkim_enabled: bool + :param dkim_enabled: Specifies whether or not to enable DKIM signing. + + """ + return self._make_request('SetIdentityDkimEnabled', { + 'Identity': identity, + 'DkimEnabled': 'true' if dkim_enabled else 'false' + }) + + def get_identity_dkim_attributes(self, identities): + """Get attributes associated with a list of verified identities. + + Given a list of verified identities (email addresses and/or domains), + returns a structure describing identity notification attributes. + + :type identities: list + :param identities: A list of verified identities (email addresses + and/or domains). + + """ + params = {} + self._build_list_params(params, identities, 'Identities.member') + return self._make_request('GetIdentityDkimAttributes', params) + + def list_identities(self): + """Returns a list containing all of the identities (email addresses + and domains) for a specific AWS Account, regardless of + verification status. + + :rtype: dict + :returns: A ListIdentitiesResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('ListIdentities') + + def get_identity_verification_attributes(self, identities): + """Given a list of identities (email addresses and/or domains), + returns the verification status and (for domain identities) + the verification token for each identity. + + :type identities: list of strings or string + :param identities: List of identities. + + :rtype: dict + :returns: A GetIdentityVerificationAttributesResponse structure. + Note that keys must be unicode strings. + """ + params = {} + self._build_list_params(params, identities, + 'Identities.member') + return self._make_request('GetIdentityVerificationAttributes', params) + + def verify_domain_identity(self, domain): + """Verifies a domain. + + :type domain: string + :param domain: The domain to be verified. + + :rtype: dict + :returns: A VerifyDomainIdentityResponse structure. Note that + keys must be unicode strings. + """ + return self._make_request('VerifyDomainIdentity', { + 'Domain': domain, + }) + + def verify_email_identity(self, email_address): + """Verifies an email address. This action causes a confirmation + email message to be sent to the specified address. + + :type email_adddress: string + :param email_address: The email address to be verified. + + :rtype: dict + :returns: A VerifyEmailIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('VerifyEmailIdentity', { + 'EmailAddress': email_address, + }) + + def delete_identity(self, identity): + """Deletes the specified identity (email address or domain) from + the list of verified identities. + + :type identity: string + :param identity: The identity to be deleted. + + :rtype: dict + :returns: A DeleteIdentityResponse structure. Note that keys must + be unicode strings. + """ + return self._make_request('DeleteIdentity', { + 'Identity': identity, + }) + + def set_identity_notification_topic(self, identity, notification_type, sns_topic=None): + """Sets an SNS topic to publish bounce or complaint notifications for + emails sent with the given identity as the Source. Publishing to topics + may only be disabled when feedback forwarding is enabled. + + :type identity: string + :param identity: An email address or domain name. + + :type notification_type: string + :param notification_type: The type of feedback notifications that will + be published to the specified topic. + Valid Values: Bounce | Complaint | Delivery + + :type sns_topic: string or None + :param sns_topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (Amazon SNS) topic. + """ + params = { + 'Identity': identity, + 'NotificationType': notification_type + } + if sns_topic: + params['SnsTopic'] = sns_topic + return self._make_request('SetIdentityNotificationTopic', params) + + def set_identity_feedback_forwarding_enabled(self, identity, forwarding_enabled=True): + """ + Enables or disables SES feedback notification via email. + Feedback forwarding may only be disabled when both complaint and + bounce topics are set. + + :type identity: string + :param identity: An email address or domain name. + + :type forwarding_enabled: bool + :param forwarding_enabled: Specifies whether or not to enable feedback forwarding. + """ + return self._make_request('SetIdentityFeedbackForwardingEnabled', { + 'Identity': identity, + 'ForwardingEnabled': 'true' if forwarding_enabled else 'false' + }) diff --git a/ext/boto/ses/exceptions.py b/ext/boto/ses/exceptions.py new file mode 100644 index 0000000000..d5649f61a6 --- /dev/null +++ b/ext/boto/ses/exceptions.py @@ -0,0 +1,80 @@ +""" +Various exceptions that are specific to the SES module. +""" +from boto.exception import BotoServerError + + +class SESError(BotoServerError): + """ + Sub-class all SES-related errors from here. Don't raise this error + directly from anywhere. The only thing this gets us is the ability to + catch SESErrors separately from the more generic, top-level + BotoServerError exception. + """ + pass + + +class SESAddressNotVerifiedError(SESError): + """ + Raised when a "Reply-To" address has not been validated in SES yet. + """ + pass + + +class SESIdentityNotVerifiedError(SESError): + """ + Raised when an identity (domain or address) has not been verified in SES yet. + """ + pass + + +class SESDomainNotConfirmedError(SESError): + """ + """ + pass + + +class SESAddressBlacklistedError(SESError): + """ + After you attempt to send mail to an address, and delivery repeatedly + fails, said address is blacklisted for at least 24 hours. The blacklisting + eventually expires, and you are able to attempt delivery again. If you + attempt to send mail to a blacklisted email, this is raised. + """ + pass + + +class SESDailyQuotaExceededError(SESError): + """ + Your account's daily (rolling 24 hour total) allotment of outbound emails + has been exceeded. + """ + pass + + +class SESMaxSendingRateExceededError(SESError): + """ + Your account's requests/second limit has been exceeded. + """ + pass + + +class SESDomainEndsWithDotError(SESError): + """ + Recipient's email address' domain ends with a period/dot. + """ + pass + + +class SESLocalAddressCharacterError(SESError): + """ + An address contained a control or whitespace character. + """ + pass + + +class SESIllegalAddressError(SESError): + """ + Raised when an illegal address is encountered. + """ + pass diff --git a/ext/boto/sns/__init__.py b/ext/boto/sns/__init__.py new file mode 100644 index 0000000000..e8818fb08e --- /dev/null +++ b/ext/boto/sns/__init__.py @@ -0,0 +1,53 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# this is here for backward compatibility +# originally, the SNSConnection class was defined here +from boto.sns.connection import SNSConnection +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the SNS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('sns', connection_cls=SNSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sns.connection.SNSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('sns', region_name, + connection_cls=SNSConnection, **kw_params) diff --git a/ext/boto/sns/connection.py b/ext/boto/sns/connection.py new file mode 100644 index 0000000000..8ba7d48183 --- /dev/null +++ b/ext/boto/sns/connection.py @@ -0,0 +1,765 @@ +# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +import hashlib + +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.compat import json +import boto + + +class SNSConnection(AWSQueryConnection): + """ + Amazon Simple Notification Service + Amazon Simple Notification Service (Amazon SNS) is a web service + that enables you to build distributed web-enabled applications. + Applications can use Amazon SNS to easily push real-time + notification messages to interested subscribers over multiple + delivery protocols. For more information about this product see + `http://aws.amazon.com/sns`_. For detailed information about + Amazon SNS features and their associated API calls, see the + `Amazon SNS Developer Guide`_. + + We also provide SDKs that enable you to access Amazon SNS from + your preferred programming language. The SDKs contain + functionality that automatically takes care of tasks such as: + cryptographically signing your service requests, retrying + requests, and handling error responses. For a list of available + SDKs, go to `Tools for Amazon Web Services`_. + """ + DefaultRegionName = boto.config.get('Boto', 'sns_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sns_region_endpoint', + 'sns.us-east-1.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sns_version', '2010-03-31') + + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, + profile_name=None): + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=SNSConnection) + self.region = region + super(SNSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + + def _build_dict_as_list_params(self, params, dictionary, name): + """ + Serialize a parameter 'name' which value is a 'dictionary' into a list of parameters. + + See: http://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html + For example:: + + dictionary = {'PlatformPrincipal': 'foo', 'PlatformCredential': 'bar'} + name = 'Attributes' + + would result in params dict being populated with: + Attributes.entry.1.key = PlatformPrincipal + Attributes.entry.1.value = foo + Attributes.entry.2.key = PlatformCredential + Attributes.entry.2.value = bar + + :param params: the resulting parameters will be added to this dict + :param dictionary: dict - value of the serialized parameter + :param name: name of the serialized parameter + """ + items = sorted(dictionary.items(), key=lambda x:x[0]) + for kv, index in zip(items, list(range(1, len(items)+1))): + key, value = kv + prefix = '%s.entry.%s' % (name, index) + params['%s.key' % prefix] = key + params['%s.value' % prefix] = value + + def _required_auth_capability(self): + return ['hmac-v4'] + + def get_all_topics(self, next_token=None): + """ + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListTopics', params) + + def get_topic_attributes(self, topic): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + """ + params = {'TopicArn': topic} + return self._make_request('GetTopicAttributes', params) + + def set_topic_attributes(self, topic, attr_name, attr_value): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + :type attr_name: string + :param attr_name: The name of the attribute you want to set. + Only a subset of the topic's attributes are mutable. + Valid values: Policy | DisplayName + + :type attr_value: string + :param attr_value: The new value for the attribute. + + """ + params = {'TopicArn': topic, + 'AttributeName': attr_name, + 'AttributeValue': attr_value} + return self._make_request('SetTopicAttributes', params) + + def add_permission(self, topic, label, account_ids, actions): + """ + Adds a statement to a topic's access control policy, granting + access for the specified AWS accounts to the specified actions. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the new policy statement. + + :type account_ids: list of strings + :param account_ids: The AWS account ids of the users who will be + give access to the specified actions. + + :type actions: list of strings + :param actions: The actions you want to allow for each of the + specified principal(s). + + """ + params = {'TopicArn': topic, + 'Label': label} + self.build_list_params(params, account_ids, 'AWSAccountId.member') + self.build_list_params(params, actions, 'ActionName.member') + return self._make_request('AddPermission', params) + + def remove_permission(self, topic, label): + """ + Removes a statement from a topic's access control policy. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the policy statement + to be removed. + + """ + params = {'TopicArn': topic, + 'Label': label} + return self._make_request('RemovePermission', params) + + def create_topic(self, topic): + """ + Create a new Topic. + + :type topic: string + :param topic: The name of the new topic. + + """ + params = {'Name': topic} + return self._make_request('CreateTopic', params) + + def delete_topic(self, topic): + """ + Delete an existing topic + + :type topic: string + :param topic: The ARN of the topic + + """ + params = {'TopicArn': topic} + return self._make_request('DeleteTopic', params, '/', 'GET') + + def publish(self, topic=None, message=None, subject=None, target_arn=None, + message_structure=None, message_attributes=None): + """ + Sends a message to all of a topic's subscribed endpoints + + :type topic: string + :param topic: The topic you want to publish to. + + :type message: string + :param message: The message you want to send to the topic. + Messages must be UTF-8 encoded strings and + be at most 4KB in size. + + :type message_structure: string + :param message_structure: Optional parameter. If left as ``None``, + plain text will be sent. If set to ``json``, + your message should be a JSON string that + matches the structure described at + http://docs.aws.amazon.com/sns/latest/dg/PublishTopic.html#sns-message-formatting-by-protocol + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + .. code-block:: python + + { + "name1": { + "data_type": "Number", + "string_value": "42" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + :type subject: string + :param subject: Optional parameter to be used as the "Subject" + line of the email notifications. + + :type target_arn: string + :param target_arn: Optional parameter for either TopicArn or + EndpointArn, but not both. + + """ + if message is None: + # To be backwards compatible when message did not have + # a default value and topic and message were required + # args. + raise TypeError("'message' is a required parameter") + params = {'Message': message} + if subject is not None: + params['Subject'] = subject + if topic is not None: + params['TopicArn'] = topic + if target_arn is not None: + params['TargetArn'] = target_arn + if message_structure is not None: + params['MessageStructure'] = message_structure + if message_attributes is not None: + keys = sorted(message_attributes.keys()) + for i, name in enumerate(keys, start=1): + attribute = message_attributes[name] + params['MessageAttributes.entry.{0}.Name'.format(i)] = name + if 'data_type' in attribute: + params['MessageAttributes.entry.{0}.Value.DataType'.format(i)] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttributes.entry.{0}.Value.StringValue'.format(i)] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttributes.entry.{0}.Value.BinaryValue'.format(i)] = \ + attribute['binary_value'] + return self._make_request('Publish', params, '/', 'POST') + + def subscribe(self, topic, protocol, endpoint): + """ + Subscribe to a Topic. + + :type topic: string + :param topic: The ARN of the new topic. + + :type protocol: string + :param protocol: The protocol used to communicate with + the subscriber. Current choices are: + email|email-json|http|https|sqs|sms|application + + :type endpoint: string + :param endpoint: The location of the endpoint for + the subscriber. + * For email, this would be a valid email address + * For email-json, this would be a valid email address + * For http, this would be a URL beginning with http + * For https, this would be a URL beginning with https + * For sqs, this would be the ARN of an SQS Queue + * For sms, this would be a phone number of an + SMS-enabled device + * For application, the endpoint is the EndpointArn + of a mobile app and device. + """ + params = {'TopicArn': topic, + 'Protocol': protocol, + 'Endpoint': endpoint} + return self._make_request('Subscribe', params) + + def subscribe_sqs_queue(self, topic, queue): + """ + Subscribe an SQS queue to a topic. + + This is convenience method that handles most of the complexity involved + in using an SQS queue as an endpoint for an SNS topic. To achieve this + the following operations are performed: + + * The correct ARN is constructed for the SQS queue and that ARN is + then subscribed to the topic. + * A JSON policy document is contructed that grants permission to + the SNS topic to send messages to the SQS queue. + * This JSON policy is then associated with the SQS queue using + the queue's set_attribute method. If the queue already has + a policy associated with it, this process will add a Statement to + that policy. If no policy exists, a new policy will be created. + + :type topic: string + :param topic: The ARN of the new topic. + + :type queue: A boto Queue object + :param queue: The queue you wish to subscribe to the SNS Topic. + """ + t = queue.id.split('/') + q_arn = queue.arn + sid = hashlib.md5((topic + q_arn).encode('utf-8')).hexdigest() + sid_exists = False + resp = self.subscribe(topic, 'sqs', q_arn) + attr = queue.get_attributes('Policy') + if 'Policy' in attr: + policy = json.loads(attr['Policy']) + else: + policy = {} + if 'Version' not in policy: + policy['Version'] = '2008-10-17' + if 'Statement' not in policy: + policy['Statement'] = [] + # See if a Statement with the Sid exists already. + for s in policy['Statement']: + if s['Sid'] == sid: + sid_exists = True + if not sid_exists: + statement = {'Action': 'SQS:SendMessage', + 'Effect': 'Allow', + 'Principal': {'AWS': '*'}, + 'Resource': q_arn, + 'Sid': sid, + 'Condition': {'StringLike': {'aws:SourceArn': topic}}} + policy['Statement'].append(statement) + queue.set_attribute('Policy', json.dumps(policy)) + return resp + + def confirm_subscription(self, topic, token, + authenticate_on_unsubscribe=False): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type token: string + :param token: Short-lived token sent to and endpoint during + the Subscribe operation. + + :type authenticate_on_unsubscribe: bool + :param authenticate_on_unsubscribe: Optional parameter indicating + that you wish to disable + unauthenticated unsubscription + of the subscription. + + """ + params = {'TopicArn': topic, 'Token': token} + if authenticate_on_unsubscribe: + params['AuthenticateOnUnsubscribe'] = 'true' + return self._make_request('ConfirmSubscription', params) + + def unsubscribe(self, subscription): + """ + Allows endpoint owner to delete subscription. + Confirmation message will be delivered. + + :type subscription: string + :param subscription: The ARN of the subscription to be deleted. + + """ + params = {'SubscriptionArn': subscription} + return self._make_request('Unsubscribe', params) + + def get_all_subscriptions(self, next_token=None): + """ + Get list of all subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptions', params) + + def get_all_subscriptions_by_topic(self, topic, next_token=None): + """ + Get list of all subscriptions to a specific topic. + + :type topic: string + :param topic: The ARN of the topic for which you wish to + find subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'TopicArn': topic} + if next_token: + params['NextToken'] = next_token + return self._make_request('ListSubscriptionsByTopic', params) + + def create_platform_application(self, name=None, platform=None, + attributes=None): + """ + The `CreatePlatformApplication` action creates a platform + application object for one of the supported push notification + services, such as APNS and GCM, to which devices and mobile + apps may register. You must specify PlatformPrincipal and + PlatformCredential attributes when using the + `CreatePlatformApplication` action. The PlatformPrincipal is + received from the notification service. For APNS/APNS_SANDBOX, + PlatformPrincipal is "SSL certificate". For GCM, + PlatformPrincipal is not applicable. For ADM, + PlatformPrincipal is "client id". The PlatformCredential is + also received from the notification service. For + APNS/APNS_SANDBOX, PlatformCredential is "private key". For + GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". The + PlatformApplicationArn that is returned when using + `CreatePlatformApplication` is then used as an attribute for + the `CreatePlatformEndpoint` action. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type name: string + :param name: Application names must be made up of only uppercase and + lowercase ASCII letters, numbers, underscores, hyphens, and + periods, and must be between 1 and 256 characters long. + + :type platform: string + :param platform: The following platforms are supported: ADM (Amazon + Device Messaging), APNS (Apple Push Notification Service), + APNS_SANDBOX, and GCM (Google Cloud Messaging). + + :type attributes: map + :param attributes: For a list of attributes, see + `SetPlatformApplicationAttributes`_ + + """ + params = {} + if name is not None: + params['Name'] = name + if platform is not None: + params['Platform'] = platform + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformApplication', + params=params) + + def set_platform_application_attributes(self, + platform_application_arn=None, + attributes=None): + """ + The `SetPlatformApplicationAttributes` action sets the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + SetPlatformApplicationAttributes action. + + :type attributes: map + :param attributes: + A map of the platform application attributes. Attributes in this map + include the following: + + + + `PlatformCredential` -- The credential received from the notification + service. For APNS/APNS_SANDBOX, PlatformCredential is "private + key". For GCM, PlatformCredential is "API key". For ADM, + PlatformCredential is "client secret". + + `PlatformPrincipal` -- The principal received from the notification + service. For APNS/APNS_SANDBOX, PlatformPrincipal is "SSL + certificate". For GCM, PlatformPrincipal is not applicable. For + ADM, PlatformPrincipal is "client id". + + `EventEndpointCreated` -- Topic ARN to which EndpointCreated event + notifications should be sent. + + `EventEndpointDeleted` -- Topic ARN to which EndpointDeleted event + notifications should be sent. + + `EventEndpointUpdated` -- Topic ARN to which EndpointUpdate event + notifications should be sent. + + `EventDeliveryFailure` -- Topic ARN to which DeliveryFailure event + notifications should be sent upon Direct Publish delivery failure + (permanent) to one of the application's endpoints. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetPlatformApplicationAttributes', + params=params) + + def get_platform_application_attributes(self, + platform_application_arn=None): + """ + The `GetPlatformApplicationAttributes` action retrieves the + attributes of the platform application object for the + supported push notification services, such as APNS and GCM. + For more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + GetPlatformApplicationAttributesInput. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='GetPlatformApplicationAttributes', + params=params) + + def list_platform_applications(self, next_token=None): + """ + The `ListPlatformApplications` action lists the platform + application objects for the supported push notification + services, such as APNS and GCM. The results for + `ListPlatformApplications` are paginated and return a limited + list of applications, up to 100. If additional records are + available after the first page results, then a NextToken + string will be returned. To receive the next page, you call + `ListPlatformApplications` using the NextToken string received + from the previous call. When there are no more records to + return, NextToken will be null. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListPlatformApplications action to retrieve additional records that + are available after the first page results. + + """ + params = {} + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListPlatformApplications', + params=params) + + def list_endpoints_by_platform_application(self, + platform_application_arn=None, + next_token=None): + """ + The `ListEndpointsByPlatformApplication` action lists the + endpoints and endpoint attributes for devices in a supported + push notification service, such as GCM and APNS. The results + for `ListEndpointsByPlatformApplication` are paginated and + return a limited list of endpoints, up to 100. If additional + records are available after the first page results, then a + NextToken string will be returned. To receive the next page, + you call `ListEndpointsByPlatformApplication` again using the + NextToken string received from the previous call. When there + are no more records to return, NextToken will be null. For + more information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn for + ListEndpointsByPlatformApplicationInput action. + + :type next_token: string + :param next_token: NextToken string is used when calling + ListEndpointsByPlatformApplication action to retrieve additional + records that are available after the first page results. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if next_token is not None: + params['NextToken'] = next_token + return self._make_request(action='ListEndpointsByPlatformApplication', + params=params) + + def delete_platform_application(self, platform_application_arn=None): + """ + The `DeletePlatformApplication` action deletes a platform + application object for one of the supported push notification + services, such as APNS and GCM. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn of platform + application object to delete. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + return self._make_request(action='DeletePlatformApplication', + params=params) + + def create_platform_endpoint(self, platform_application_arn=None, + token=None, custom_user_data=None, + attributes=None): + """ + The `CreatePlatformEndpoint` creates an endpoint for a device + and mobile app on one of the supported push notification + services, such as GCM and APNS. `CreatePlatformEndpoint` + requires the PlatformApplicationArn that is returned from + `CreatePlatformApplication`. The EndpointArn that is returned + when using `CreatePlatformEndpoint` can then be used by the + `Publish` action to send a message to a mobile app or by the + `Subscribe` action for subscription to a topic. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type platform_application_arn: string + :param platform_application_arn: PlatformApplicationArn returned from + CreatePlatformApplication is used to create a an endpoint. + + :type token: string + :param token: Unique identifier created by the notification service for + an app on a device. The specific name for Token will vary, + depending on which notification service is being used. For example, + when using APNS as the notification service, you need the device + token. Alternatively, when using GCM or ADM, the device token + equivalent is called the registration ID. + + :type custom_user_data: string + :param custom_user_data: Arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + :type attributes: map + :param attributes: For a list of attributes, see + `SetEndpointAttributes`_. + + """ + params = {} + if platform_application_arn is not None: + params['PlatformApplicationArn'] = platform_application_arn + if token is not None: + params['Token'] = token + if custom_user_data is not None: + params['CustomUserData'] = custom_user_data + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='CreatePlatformEndpoint', + params=params) + + def delete_endpoint(self, endpoint_arn=None): + """ + The `DeleteEndpoint` action, which is idempotent, deletes the + endpoint from SNS. For more information, see `Using Amazon SNS + Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn of endpoint to delete. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='DeleteEndpoint', params=params) + + def set_endpoint_attributes(self, endpoint_arn=None, attributes=None): + """ + The `SetEndpointAttributes` action sets the attributes for an + endpoint for a device on one of the supported push + notification services, such as GCM and APNS. For more + information, see `Using Amazon SNS Mobile Push + Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn used for SetEndpointAttributes action. + + :type attributes: map + :param attributes: + A map of the endpoint attributes. Attributes in this map include the + following: + + + + `CustomUserData` -- arbitrary user data to associate with the + endpoint. SNS does not use this data. The data must be in UTF-8 + format and less than 2KB. + + `Enabled` -- flag that enables/disables delivery to the endpoint. + Message Processor will set this to false when a notification + service indicates to SNS that the endpoint is invalid. Users can + set it back to true, typically after updating Token. + + `Token` -- device token, also referred to as a registration id, for + an app and mobile device. This is returned from the notification + service when an app and mobile device are registered with the + notification service. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + if attributes is not None: + self._build_dict_as_list_params(params, attributes, 'Attributes') + return self._make_request(action='SetEndpointAttributes', + params=params) + + def get_endpoint_attributes(self, endpoint_arn=None): + """ + The `GetEndpointAttributes` retrieves the endpoint attributes + for a device on one of the supported push notification + services, such as GCM and APNS. For more information, see + `Using Amazon SNS Mobile Push Notifications`_. + + :type endpoint_arn: string + :param endpoint_arn: EndpointArn for GetEndpointAttributes input. + + """ + params = {} + if endpoint_arn is not None: + params['EndpointArn'] = endpoint_arn + return self._make_request(action='GetEndpointAttributes', + params=params) + + def _make_request(self, action, params, path='/', verb='GET'): + params['ContentType'] = 'JSON' + response = self.make_request(action=action, verb=verb, + path=path, params=params) + body = response.read().decode('utf-8') + boto.log.debug(body) + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) diff --git a/ext/boto/sqs/__init__.py b/ext/boto/sqs/__init__.py new file mode 100644 index 0000000000..0614c51e92 --- /dev/null +++ b/ext/boto/sqs/__init__.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.sqs.regioninfo import SQSRegionInfo +from boto.regioninfo import get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the SQS service. + + :rtype: list + :return: A list of :class:`boto.sqs.regioninfo.RegionInfo` + """ + from boto.sqs.connection import SQSConnection + return get_regions( + 'sqs', + region_cls=SQSRegionInfo, + connection_cls=SQSConnection + ) + + +def connect_to_region(region_name, **kw_params): + from boto.sqs.connection import SQSConnection + return connect('sqs', region_name, region_cls=SQSRegionInfo, + connection_cls=SQSConnection, **kw_params) diff --git a/ext/boto/sqs/attributes.py b/ext/boto/sqs/attributes.py new file mode 100644 index 0000000000..26c720416f --- /dev/null +++ b/ext/boto/sqs/attributes.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Attribute Name/Value set +""" + +class Attributes(dict): + + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Attribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + self.current_value = value + else: + setattr(self, name, value) + + diff --git a/ext/boto/sqs/batchresults.py b/ext/boto/sqs/batchresults.py new file mode 100644 index 0000000000..aa5f86b8be --- /dev/null +++ b/ext/boto/sqs/batchresults.py @@ -0,0 +1,95 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011 Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +A set of results returned by SendMessageBatch. +""" + +class ResultEntry(dict): + """ + The result (successful or unsuccessful) of a single + message within a send_message_batch request. + + In the case of a successful result, this dict-like + object will contain the following items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar message_id: A string containing the SQS ID of the new message. + :ivar message_md5: A string containing the MD5 hash of the message body. + + In the case of an error, this object will contain the following + items: + + :ivar id: A string containing the user-supplied ID of the message. + :ivar sender_fault: A boolean value. + :ivar error_code: A string containing a short description of the error. + :ivar error_message: A string containing a description of the error. + """ + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self['id'] = value + elif name == 'MessageId': + self['message_id'] = value + elif name == 'MD5OfMessageBody': + self['message_md5'] = value + elif name == 'SenderFault': + self['sender_fault'] = value + elif name == 'Code': + self['error_code'] = value + elif name == 'Message': + self['error_message'] = value + +class BatchResults(object): + """ + A container for the results of a send_message_batch request. + + :ivar results: A list of successful results. Each item in the + list will be an instance of :class:`ResultEntry`. + + :ivar errors: A list of unsuccessful results. Each item in the + list will be an instance of :class:`ResultEntry`. + """ + + def __init__(self, parent): + self.parent = parent + self.results = [] + self.errors = [] + + def startElement(self, name, attrs, connection): + if name.endswith('MessageBatchResultEntry'): + entry = ResultEntry() + self.results.append(entry) + return entry + if name == 'BatchResultErrorEntry': + entry = ResultEntry() + self.errors.append(entry) + return entry + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + diff --git a/ext/boto/sqs/bigmessage.py b/ext/boto/sqs/bigmessage.py new file mode 100644 index 0000000000..e47ec045a1 --- /dev/null +++ b/ext/boto/sqs/bigmessage.py @@ -0,0 +1,119 @@ +# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +import boto +from boto.sqs.message import RawMessage +from boto.exception import SQSDecodeError + + +class BigMessage(RawMessage): + """ + The BigMessage class provides large payloads (up to 5GB) + by storing the payload itself in S3 and then placing a reference + to the S3 object in the actual SQS message payload. + + To create a BigMessage, you should create a BigMessage object + and pass in a file-like object as the ``body`` param and also + pass in the an S3 URL specifying the bucket in which to store + the message body:: + + import boto.sqs + from boto.sqs.bigmessage import BigMessage + + sqs = boto.sqs.connect_to_region('us-west-2') + queue = sqs.get_queue('myqueue') + fp = open('/path/to/bigmessage/data') + msg = BigMessage(queue, fp, 's3://mybucket') + queue.write(msg) + + Passing in a fully-qualified S3 URL (e.g. s3://mybucket/foo) + is interpreted to mean that the body of the message is already + stored in S3 and the that S3 URL is then used directly with no + content uploaded by BigMessage. + """ + + def __init__(self, queue=None, body=None, s3_url=None): + self.s3_url = s3_url + super(BigMessage, self).__init__(queue, body) + + def _get_bucket_key(self, s3_url): + bucket_name = key_name = None + if s3_url: + if s3_url.startswith('s3://'): + # We need to split out the bucket from the key (if + # supplied). We also have to be aware that someone + # may provide a trailing '/' character as in: + # s3://foo/ and we want to handle that. + s3_components = s3_url[5:].split('/', 1) + bucket_name = s3_components[0] + if len(s3_components) > 1: + if s3_components[1]: + key_name = s3_components[1] + else: + msg = 's3_url parameter should start with s3://' + raise SQSDecodeError(msg, self) + return bucket_name, key_name + + def encode(self, value): + """ + :type value: file-like object + :param value: A file-like object containing the content + of the message. The actual content will be stored + in S3 and a link to the S3 object will be stored in + the message body. + """ + bucket_name, key_name = self._get_bucket_key(self.s3_url) + if bucket_name and key_name: + return self.s3_url + key_name = uuid.uuid4() + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.new_key(key_name) + key.set_contents_from_file(value) + self.s3_url = 's3://%s/%s' % (bucket_name, key_name) + return self.s3_url + + def _get_s3_object(self, s3_url): + bucket_name, key_name = self._get_bucket_key(s3_url) + if bucket_name and key_name: + s3_conn = boto.connect_s3() + s3_bucket = s3_conn.get_bucket(bucket_name) + key = s3_bucket.get_key(key_name) + return key + else: + msg = 'Unable to decode S3 URL: %s' % s3_url + raise SQSDecodeError(msg, self) + + def decode(self, value): + self.s3_url = value + key = self._get_s3_object(value) + return key.get_contents_as_string() + + def delete(self): + # Delete the object in S3 first, then delete the SQS message + if self.s3_url: + key = self._get_s3_object(self.s3_url) + key.delete() + super(BigMessage, self).delete() + diff --git a/ext/boto/sqs/connection.py b/ext/boto/sqs/connection.py new file mode 100644 index 0000000000..6a5adf64b8 --- /dev/null +++ b/ext/boto/sqs/connection.py @@ -0,0 +1,596 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.connection import AWSQueryConnection +from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.queue import Queue +from boto.sqs.message import Message +from boto.sqs.attributes import Attributes +from boto.sqs.batchresults import BatchResults +from boto.exception import SQSError, BotoServerError + + +class SQSConnection(AWSQueryConnection): + """ + A Connection to the SQS Service. + """ + DefaultRegionName = boto.config.get('Boto', 'sqs_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'sqs_region_endpoint', + 'queue.amazonaws.com') + APIVersion = boto.config.get('Boto', 'sqs_version', '2012-11-05') + DefaultContentType = 'text/plain' + ResponseError = SQSError + AuthServiceName = 'sqs' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + security_token=None, validate_certs=True, profile_name=None): + if not region: + region = SQSRegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + self.region = region + super(SQSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, + proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + security_token=security_token, + validate_certs=validate_certs, + profile_name=profile_name) + self.auth_region_name = self.region.name + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_queue(self, queue_name, visibility_timeout=None): + """ + Create an SQS Queue. + + :type queue_name: str or unicode + :param queue_name: The name of the new queue. Names are + scoped to an account and need to be unique within that + account. Calling this method on an existing queue name + will not return an error from SQS unless the value for + visibility_timeout is different than the value of the + existing queue of that name. This is still an expensive + operation, though, and not the preferred way to check for + the existence of a queue. See the + :func:`boto.sqs.connection.SQSConnection.lookup` method. + + :type visibility_timeout: int + :param visibility_timeout: The default visibility timeout for + all messages written in the queue. This can be overridden + on a per-message. + + :rtype: :class:`boto.sqs.queue.Queue` + :return: The newly created queue. + + """ + params = {'QueueName': queue_name} + if visibility_timeout: + params['Attribute.1.Name'] = 'VisibilityTimeout' + params['Attribute.1.Value'] = int(visibility_timeout) + return self.get_object('CreateQueue', params, Queue) + + def delete_queue(self, queue, force_deletion=False): + """ + Delete an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type force_deletion: Boolean + :param force_deletion: A deprecated parameter that is no longer used by + SQS's API. + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('DeleteQueue', None, queue.id) + + def purge_queue(self, queue): + """ + Purge all messages in an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be purged + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('PurgeQueue', None, queue.id) + + def get_queue_attributes(self, queue, attribute='All'): + """ + Gets one or all attributes of a Queue + + :type queue: A Queue object + :param queue: The SQS queue to get attributes for + + :type attribute: str + :param attribute: The specific attribute requested. If not + supplied, the default is to return all attributes. Valid + attributes are: + + * All + * ApproximateNumberOfMessages + * ApproximateNumberOfMessagesNotVisible + * VisibilityTimeout + * CreatedTimestamp + * LastModifiedTimestamp + * Policy + * MaximumMessageSize + * MessageRetentionPeriod + * QueueArn + * ApproximateNumberOfMessagesDelayed + * DelaySeconds + * ReceiveMessageWaitTimeSeconds + * RedrivePolicy + + :rtype: :class:`boto.sqs.attributes.Attributes` + :return: An Attributes object containing request value(s). + """ + params = {'AttributeName' : attribute} + return self.get_object('GetQueueAttributes', params, + Attributes, queue.id) + + def set_queue_attribute(self, queue, attribute, value): + """ + Set a new value for an attribute of a Queue. + + :type queue: A Queue object + :param queue: The SQS queue to get attributes for + + :type attribute: String + :param attribute: The name of the attribute you want to set. + + :param value: The new value for the attribute must be: + + * For `DelaySeconds` the value must be an integer number of + seconds from 0 to 900 (15 minutes). + >>> connection.set_queue_attribute(queue, 'DelaySeconds', 900) + + * For `MaximumMessageSize` the value must be an integer number of + bytes from 1024 (1 KiB) to 262144 (256 KiB). + >>> connection.set_queue_attribute(queue, 'MaximumMessageSize', 262144) + + * For `MessageRetentionPeriod` the value must be an integer number of + seconds from 60 (1 minute) to 1209600 (14 days). + >>> connection.set_queue_attribute(queue, 'MessageRetentionPeriod', 1209600) + + * For `Policy` the value must be an string that contains JSON formatted + parameters and values. + >>> connection.set_queue_attribute(queue, 'Policy', json.dumps({ + ... 'Version': '2008-10-17', + ... 'Id': '/123456789012/testQueue/SQSDefaultPolicy', + ... 'Statement': [ + ... { + ... 'Sid': 'Queue1ReceiveMessage', + ... 'Effect': 'Allow', + ... 'Principal': { + ... 'AWS': '*' + ... }, + ... 'Action': 'SQS:ReceiveMessage', + ... 'Resource': 'arn:aws:aws:sqs:us-east-1:123456789012:testQueue' + ... } + ... ] + ... })) + + * For `ReceiveMessageWaitTimeSeconds` the value must be an integer number of + seconds from 0 to 20. + >>> connection.set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', 20) + + * For `VisibilityTimeout` the value must be an integer number of + seconds from 0 to 43200 (12 hours). + >>> connection.set_queue_attribute(queue, 'VisibilityTimeout', 43200) + + * For `RedrivePolicy` the value must be an string that contains JSON formatted + parameters and values. You can set maxReceiveCount to a value between 1 and 1000. + The deadLetterTargetArn value is the Amazon Resource Name (ARN) of the queue that + will receive the dead letter messages. + >>> connection.set_queue_attribute(queue, 'RedrivePolicy', json.dumps({ + ... 'maxReceiveCount': 5, + ... 'deadLetterTargetArn': "arn:aws:aws:sqs:us-east-1:123456789012:testDeadLetterQueue" + ... })) + """ + + params = {'Attribute.Name' : attribute, 'Attribute.Value' : value} + return self.get_status('SetQueueAttributes', params, queue.id) + + def receive_message(self, queue, number_messages=1, + visibility_timeout=None, attributes=None, + wait_time_seconds=None, message_attributes=None): + """ + Read messages from an SQS Queue. + + :type queue: A Queue object + :param queue: The Queue from which messages are read. + + :type number_messages: int + :param number_messages: The maximum number of messages to read + (default=1) + + :type visibility_timeout: int + :param visibility_timeout: The number of seconds the message should + remain invisible to other queue readers + (default=None which uses the Queues default) + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: + * All + * SenderId + * SentTimestamp + * ApproximateReceiveCount + * ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + + """ + params = {'MaxNumberOfMessages' : number_messages} + if visibility_timeout is not None: + params['VisibilityTimeout'] = visibility_timeout + if attributes is not None: + self.build_list_params(params, attributes, 'AttributeName') + if wait_time_seconds is not None: + params['WaitTimeSeconds'] = wait_time_seconds + if message_attributes is not None: + self.build_list_params(params, message_attributes, + 'MessageAttributeName') + return self.get_list('ReceiveMessage', params, + [('Message', queue.message_class)], + queue.id, queue) + + def delete_message(self, queue, message): + """ + Delete a message from a queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type message: A :class:`boto.sqs.message.Message` object + :param message: The Message to be deleted + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : message.receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def delete_message_batch(self, queue, messages): + """ + Deletes a list of messages from a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + params = {} + for i, msg in enumerate(messages): + prefix = 'DeleteMessageBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = msg.id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = msg.receipt_handle + return self.get_object('DeleteMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def delete_message_from_handle(self, queue, receipt_handle): + """ + Delete a message from a queue, given a receipt handle. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle for the message + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def send_message(self, queue, message_content, delay_seconds=None, + message_attributes=None): + """ + Send a new message to the queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type message_content: string + :param message_content: The body of the message + + :type delay_seconds: int + :param delay_seconds: Number of seconds (0 - 900) to delay this + message from being processed. + + :type message_attributes: dict + :param message_attributes: Message attributes to set. Should be + of the form: + + { + "name1": { + "data_type": "Number", + "string_value": "1" + }, + "name2": { + "data_type": "String", + "string_value": "Bob" + } + } + + """ + params = {'MessageBody' : message_content} + if delay_seconds: + params['DelaySeconds'] = int(delay_seconds) + + if message_attributes is not None: + keys = sorted(message_attributes.keys()) + for i, name in enumerate(keys, start=1): + attribute = message_attributes[name] + params['MessageAttribute.%s.Name' % i] = name + if 'data_type' in attribute: + params['MessageAttribute.%s.Value.DataType' % i] = \ + attribute['data_type'] + if 'string_value' in attribute: + params['MessageAttribute.%s.Value.StringValue' % i] = \ + attribute['string_value'] + if 'binary_value' in attribute: + params['MessageAttribute.%s.Value.BinaryValue' % i] = \ + attribute['binary_value'] + if 'string_list_value' in attribute: + params['MessageAttribute.%s.Value.StringListValue' % i] = \ + attribute['string_list_value'] + if 'binary_list_value' in attribute: + params['MessageAttribute.%s.Value.BinaryListValue' % i] = \ + attribute['binary_list_value'] + + return self.get_object('SendMessage', params, Message, + queue.id, verb='POST') + + def send_message_batch(self, queue, messages): + """ + Delivers up to 10 messages to a queue in a single request. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + above. + + """ + params = {} + for i, msg in enumerate(messages): + base = 'SendMessageBatchRequestEntry.%i' % (i + 1) + params['%s.Id' % base] = msg[0] + params['%s.MessageBody' % base] = msg[1] + params['%s.DelaySeconds' % base] = msg[2] + if len(msg) > 3: + base += '.MessageAttribute' + keys = sorted(msg[3].keys()) + for j, name in enumerate(keys): + attribute = msg[3][name] + + p_name = '%s.%i.Name' % (base, j + 1) + params[p_name] = name + + if 'data_type' in attribute: + p_name = '%s.%i.Value.DataType' % (base, j + 1) + params[p_name] = attribute['data_type'] + if 'string_value' in attribute: + p_name = '%s.%i.Value.StringValue' % (base, j + 1) + params[p_name] = attribute['string_value'] + if 'binary_value' in attribute: + p_name = '%s.%i.Value.BinaryValue' % (base, j + 1) + params[p_name] = attribute['binary_value'] + if 'string_list_value' in attribute: + p_name = '%s.%i.Value.StringListValue' % (base, j + 1) + params[p_name] = attribute['string_list_value'] + if 'binary_list_value' in attribute: + p_name = '%s.%i.Value.BinaryListValue' % (base, j + 1) + params[p_name] = attribute['binary_list_value'] + + return self.get_object('SendMessageBatch', params, BatchResults, + queue.id, verb='POST') + + def change_message_visibility(self, queue, receipt_handle, + visibility_timeout): + """ + Extends the read lock timeout for the specified message from + the specified queue to the specified value. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle associated with the message + whose visibility timeout will be changed. + + :type visibility_timeout: int + :param visibility_timeout: The new value of the message's visibility + timeout in seconds. + """ + params = {'ReceiptHandle' : receipt_handle, + 'VisibilityTimeout' : visibility_timeout} + return self.get_status('ChangeMessageVisibility', params, queue.id) + + def change_message_visibility_batch(self, queue, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The Queue to which the messages will be written. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + params = {} + for i, t in enumerate(messages): + prefix = 'ChangeMessageVisibilityBatchRequestEntry' + p_name = '%s.%i.Id' % (prefix, (i+1)) + params[p_name] = t[0].id + p_name = '%s.%i.ReceiptHandle' % (prefix, (i+1)) + params[p_name] = t[0].receipt_handle + p_name = '%s.%i.VisibilityTimeout' % (prefix, (i+1)) + params[p_name] = t[1] + return self.get_object('ChangeMessageVisibilityBatch', + params, BatchResults, + queue.id, verb='POST') + + def get_all_queues(self, prefix=''): + """ + Retrieves all queues. + + :keyword str prefix: Optionally, only return queues that start with + this value. + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {} + if prefix: + params['QueueNamePrefix'] = prefix + return self.get_list('ListQueues', params, [('QueueUrl', Queue)]) + + def get_queue(self, queue_name, owner_acct_id=None): + """ + Retrieves the queue with the given name, or ``None`` if no match + was found. + + :param str queue_name: The name of the queue to retrieve. + :param str owner_acct_id: Optionally, the AWS account ID of the account that created the queue. + :rtype: :py:class:`boto.sqs.queue.Queue` or ``None`` + :returns: The requested queue, or ``None`` if no match was found. + """ + params = {'QueueName': queue_name} + if owner_acct_id: + params['QueueOwnerAWSAccountId']=owner_acct_id + try: + return self.get_object('GetQueueUrl', params, Queue) + except SQSError: + return None + + lookup = get_queue + + def get_dead_letter_source_queues(self, queue): + """ + Retrieves the dead letter source queues for a given queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object. + :param queue: The queue for which to get DL source queues + :rtype: list + :returns: A list of :py:class:`boto.sqs.queue.Queue` instances. + """ + params = {'QueueUrl': queue.url} + return self.get_list('ListDeadLetterSourceQueues', params, + [('QueueUrl', Queue)]) + + # + # Permissions methods + # + + def add_permission(self, queue, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal + who will be given permission. The principal must have an + AWS account, but does not need to be signed up for Amazon + SQS. For information about locating the AWS account + identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + * * + * SendMessage + * ReceiveMessage + * DeleteMessage + * ChangeMessageVisibility + * GetQueueAttributes + + :rtype: bool + :return: True if successful, False otherwise. + + """ + params = {'Label': label, + 'AWSAccountId' : aws_account_id, + 'ActionName' : action_name} + return self.get_status('AddPermission', params, queue.id) + + def remove_permission(self, queue, label): + """ + Remove a permission from a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'Label': label} + return self.get_status('RemovePermission', params, queue.id) diff --git a/ext/boto/sqs/jsonmessage.py b/ext/boto/sqs/jsonmessage.py new file mode 100644 index 0000000000..520eb8eb06 --- /dev/null +++ b/ext/boto/sqs/jsonmessage.py @@ -0,0 +1,43 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import base64 + +from boto.sqs.message import MHMessage +from boto.exception import SQSDecodeError +from boto.compat import json + + +class JSONMessage(MHMessage): + """ + Acts like a dictionary but encodes it's data as a Base64 encoded JSON payload. + """ + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + value = json.loads(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return value + + def encode(self, value): + value = json.dumps(value) + return base64.b64encode(value.encode('utf-8')).decode('utf-8') diff --git a/ext/boto/sqs/message.py b/ext/boto/sqs/message.py new file mode 100644 index 0000000000..656734fa67 --- /dev/null +++ b/ext/boto/sqs/message.py @@ -0,0 +1,271 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +SQS Message + +A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS +Message are here: + + http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html + +So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes +back out. However, to allow messages to have richer semantics, the Message class must support the +following interfaces: + +The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a +boto Queue object and represents the queue that the message will be stored in. The default value for +this parameter is None. + +The constructor for the Message class must accept a keyword parameter "body" which represents the +content or body of the message. The format of this parameter will depend on the behavior of the +particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the +user the body passed to the constructor should be a dict-like object that can be used to populate +the initial state of the message. + +The Message class must provide an encode method that accepts a value of the same type as the body +parameter of the constructor and returns a string of characters that are able to be stored in an +SQS message body (see rules above). + +The Message class must provide a decode method that accepts a string of characters that can be +stored (and probably were stored!) in an SQS message and return an object of a type that is consistent +with the "body" parameter accepted on the class constructor. + +The Message class must provide a __len__ method that will return the size of the encoded message +that would be stored in SQS based on the current state of the Message object. + +The Message class must provide a get_body method that will return the body of the message in the +same format accepted in the constructor of the class. + +The Message class must provide a set_body method that accepts a message body in the same format +accepted by the constructor of the class. This method should alter to the internal state of the +Message object to reflect the state represented in the message body parameter. + +The Message class must provide a get_body_encoded method that returns the current body of the message +in the format in which it would be stored in SQS. +""" + +import base64 + +import boto + +from boto.compat import StringIO +from boto.compat import six +from boto.sqs.attributes import Attributes +from boto.sqs.messageattributes import MessageAttributes +from boto.exception import SQSDecodeError + +class RawMessage(object): + """ + Base class for SQS messages. RawMessage does not encode the message + in any way. Whatever you store in the body of the message is what + will be written to SQS and whatever is returned from SQS is stored + directly into the body of the message. + """ + + def __init__(self, queue=None, body=''): + self.queue = queue + self.set_body(body) + self.id = None + self.receipt_handle = None + self.md5 = None + self.attributes = Attributes(self) + self.message_attributes = MessageAttributes(self) + self.md5_message_attributes = None + + def __len__(self): + return len(self.encode(self._body)) + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + return self.attributes + if name == 'MessageAttribute': + return self.message_attributes + return None + + def endElement(self, name, value, connection): + if name == 'Body': + self.set_body(value) + elif name == 'MessageId': + self.id = value + elif name == 'ReceiptHandle': + self.receipt_handle = value + elif name == 'MD5OfBody': + self.md5 = value + elif name == 'MD5OfMessageAttributes': + self.md5_message_attributes = value + else: + setattr(self, name, value) + + def endNode(self, connection): + self.set_body(self.decode(self.get_body())) + + def encode(self, value): + """Transform body object into serialized byte array format.""" + return value + + def decode(self, value): + """Transform seralized byte array into any object.""" + return value + + def set_body(self, body): + """Override the current body for this object, using decoded format.""" + self._body = body + + def get_body(self): + return self._body + + def get_body_encoded(self): + """ + This method is really a semi-private method used by the Queue.write + method when writing the contents of the message to SQS. + You probably shouldn't need to call this method in the normal course of events. + """ + return self.encode(self.get_body()) + + def delete(self): + if self.queue: + return self.queue.delete_message(self) + + def change_visibility(self, visibility_timeout): + if self.queue: + self.queue.connection.change_message_visibility(self.queue, + self.receipt_handle, + visibility_timeout) + +class Message(RawMessage): + """ + The default Message class used for SQS queues. This class automatically + encodes/decodes the message body using Base64 encoding to avoid any + illegal characters in the message body. See: + + https://forums.aws.amazon.com/thread.jspa?threadID=13067 + + for details on why this is a good idea. The encode/decode is meant to + be transparent to the end-user. + """ + + def encode(self, value): + if not isinstance(value, six.binary_type): + value = value.encode('utf-8') + return base64.b64encode(value).decode('utf-8') + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + except: + boto.log.warning('Unable to decode message') + return value + return value + +class MHMessage(Message): + """ + The MHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + The encoding/decoding of this is handled automatically and after + the message body has been read, the message instance can be treated + like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def __init__(self, queue=None, body=None, xml_attrs=None): + if body is None or body == '': + body = {} + super(MHMessage, self).__init__(queue, body) + + def decode(self, value): + try: + msg = {} + fp = StringIO(value) + line = fp.readline() + while line: + delim = line.find(':') + key = line[0:delim] + value = line[delim+1:].strip() + msg[key.strip()] = value.strip() + line = fp.readline() + except: + raise SQSDecodeError('Unable to decode message', self) + return msg + + def encode(self, value): + s = '' + for item in value.items(): + s = s + '%s: %s\n' % (item[0], item[1]) + return s + + def __contains__(self, key): + return key in self._body + + def __getitem__(self, key): + if key in self._body: + return self._body[key] + else: + raise KeyError(key) + + def __setitem__(self, key, value): + self._body[key] = value + self.set_body(self._body) + + def keys(self): + return self._body.keys() + + def values(self): + return self._body.values() + + def items(self): + return self._body.items() + + def has_key(self, key): + return key in self._body + + def update(self, d): + self._body.update(d) + self.set_body(self._body) + + def get(self, key, default=None): + return self._body.get(key, default) + +class EncodedMHMessage(MHMessage): + """ + The EncodedMHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + This variation encodes/decodes the body of the message in base64 automatically. + The message instance can be treated like a mapping object, + i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def decode(self, value): + try: + value = base64.b64decode(value.encode('utf-8')).decode('utf-8') + except: + raise SQSDecodeError('Unable to decode message', self) + return super(EncodedMHMessage, self).decode(value) + + def encode(self, value): + value = super(EncodedMHMessage, self).encode(value) + return base64.b64encode(value.encode('utf-8')).decode('utf-8') + diff --git a/ext/boto/sqs/messageattributes.py b/ext/boto/sqs/messageattributes.py new file mode 100644 index 0000000000..7e61bf3668 --- /dev/null +++ b/ext/boto/sqs/messageattributes.py @@ -0,0 +1,66 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2014 Amazon.com, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS MessageAttribute Name/Value set +""" + +class MessageAttributes(dict): + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + if name == 'Value': + self.current_value = MessageAttributeValue(self) + return self.current_value + + def endElement(self, name, value, connection): + if name == 'MessageAttribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + pass + else: + setattr(self, name, value) + + +class MessageAttributeValue(dict): + def __init__(self, parent): + self.parent = parent + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'DataType': + self['data_type'] = value + elif name == 'StringValue': + self['string_value'] = value + elif name == 'BinaryValue': + self['binary_value'] = value + elif name == 'StringListValue': + self['string_list_value'] = value + elif name == 'BinaryListValue': + self['binary_list_value'] = value diff --git a/ext/boto/sqs/queue.py b/ext/boto/sqs/queue.py new file mode 100644 index 0000000000..c81ed76e3e --- /dev/null +++ b/ext/boto/sqs/queue.py @@ -0,0 +1,541 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Queue +""" +from boto.compat import urllib +from boto.sqs.message import Message + + +class Queue(object): + + def __init__(self, connection=None, url=None, message_class=Message): + self.connection = connection + self.url = url + self.message_class = message_class + self.visibility_timeout = None + + def __repr__(self): + return 'Queue(%s)' % self.url + + def _id(self): + if self.url: + val = urllib.parse.urlparse(self.url)[2] + else: + val = self.url + return val + id = property(_id) + + def _name(self): + if self.url: + val = urllib.parse.urlparse(self.url)[2].split('/')[2] + else: + val = self.url + return val + name = property(_name) + + def _arn(self): + parts = self.id.split('/') + if self.connection.region.name == 'cn-north-1': + partition = 'aws-cn' + else: + partition = 'aws' + return 'arn:%s:sqs:%s:%s:%s' % ( + partition, self.connection.region.name, parts[1], parts[2]) + arn = property(_arn) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'QueueUrl': + self.url = value + elif name == 'VisibilityTimeout': + self.visibility_timeout = int(value) + else: + setattr(self, name, value) + + def set_message_class(self, message_class): + """ + Set the message class that should be used when instantiating + messages read from the queue. By default, the class + :class:`boto.sqs.message.Message` is used but this can be overriden + with any class that behaves like a message. + + :type message_class: Message-like class + :param message_class: The new Message class + """ + self.message_class = message_class + + def get_attributes(self, attributes='All'): + """ + Retrieves attributes about this queue object and returns + them in an Attribute instance (subclass of a Dictionary). + + :type attributes: string + :param attributes: String containing one of: + ApproximateNumberOfMessages, + ApproximateNumberOfMessagesNotVisible, + VisibilityTimeout, + CreatedTimestamp, + LastModifiedTimestamp, + Policy + ReceiveMessageWaitTimeSeconds + :rtype: Attribute object + :return: An Attribute object which is a mapping type holding the + requested name/value pairs + """ + return self.connection.get_queue_attributes(self, attributes) + + def set_attribute(self, attribute, value): + """ + Set a new value for an attribute of the Queue. + + :type attribute: String + :param attribute: The name of the attribute you want to set. + + :param value: The new value for the attribute must be: + + + * For `DelaySeconds` the value must be an integer number of + seconds from 0 to 900 (15 minutes). + >>> queue.set_attribute('DelaySeconds', 900) + + * For `MaximumMessageSize` the value must be an integer number of + bytes from 1024 (1 KiB) to 262144 (256 KiB). + >>> queue.set_attribute('MaximumMessageSize', 262144) + + * For `MessageRetentionPeriod` the value must be an integer number of + seconds from 60 (1 minute) to 1209600 (14 days). + >>> queue.set_attribute('MessageRetentionPeriod', 1209600) + + * For `Policy` the value must be an string that contains JSON formatted + parameters and values. + >>> queue.set_attribute('Policy', json.dumps({ + ... 'Version': '2008-10-17', + ... 'Id': '/123456789012/testQueue/SQSDefaultPolicy', + ... 'Statement': [ + ... { + ... 'Sid': 'Queue1ReceiveMessage', + ... 'Effect': 'Allow', + ... 'Principal': { + ... 'AWS': '*' + ... }, + ... 'Action': 'SQS:ReceiveMessage', + ... 'Resource': 'arn:aws:aws:sqs:us-east-1:123456789012:testQueue' + ... } + ... ] + ... })) + + * For `ReceiveMessageWaitTimeSeconds` the value must be an integer number of + seconds from 0 to 20. + >>> queue.set_attribute('ReceiveMessageWaitTimeSeconds', 20) + + * For `VisibilityTimeout` the value must be an integer number of + seconds from 0 to 43200 (12 hours). + >>> queue.set_attribute('VisibilityTimeout', 43200) + + * For `RedrivePolicy` the value must be an string that contains JSON formatted + parameters and values. You can set maxReceiveCount to a value between 1 and 1000. + The deadLetterTargetArn value is the Amazon Resource Name (ARN) of the queue that + will receive the dead letter messages. + >>> queue.set_attribute('RedrivePolicy', json.dumps({ + ... 'maxReceiveCount': 5, + ... 'deadLetterTargetArn': "arn:aws:aws:sqs:us-east-1:123456789012:testDeadLetterQueue" + ... })) + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.set_queue_attribute(self, attribute, value) + + def get_timeout(self): + """ + Get the visibility timeout for the queue. + + :rtype: int + :return: The number of seconds as an integer. + """ + a = self.get_attributes('VisibilityTimeout') + return int(a['VisibilityTimeout']) + + def set_timeout(self, visibility_timeout): + """ + Set the visibility timeout for the queue. + + :type visibility_timeout: int + :param visibility_timeout: The desired timeout in seconds + """ + retval = self.set_attribute('VisibilityTimeout', visibility_timeout) + if retval: + self.visibility_timeout = visibility_timeout + return retval + + def add_permission(self, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal who + will be given permission. The principal must have an AWS account, + but does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + SendMessage|ReceiveMessage|DeleteMessage| + ChangeMessageVisibility|GetQueueAttributes|* + + :rtype: bool + :return: True if successful, False otherwise. + + """ + return self.connection.add_permission(self, label, aws_account_id, + action_name) + + def remove_permission(self, label): + """ + Remove a permission from a queue. + + :type label: str or unicode + :param label: The unique label associated with the permission + being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + return self.connection.remove_permission(self, label) + + def read(self, visibility_timeout=None, wait_time_seconds=None, + message_attributes=None): + """ + Read a single message from the queue. + + :type visibility_timeout: int + :param visibility_timeout: The timeout for this message in seconds + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: :class:`boto.sqs.message.Message` + :return: A single message or None if queue is empty + """ + rs = self.get_messages(1, visibility_timeout, + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) + if len(rs) == 1: + return rs[0] + else: + return None + + def write(self, message, delay_seconds=None): + """ + Add a single message to the queue. + + :type message: Message + :param message: The message to be written to the queue + + :rtype: :class:`boto.sqs.message.Message` + :return: The :class:`boto.sqs.message.Message` object that was written. + """ + new_msg = self.connection.send_message(self, + message.get_body_encoded(), delay_seconds=delay_seconds, + message_attributes=message.message_attributes) + message.id = new_msg.id + message.md5 = new_msg.md5 + return message + + def write_batch(self, messages): + """ + Delivers up to 10 messages in a single request. + + :type messages: List of lists. + :param messages: A list of lists or tuples. Each inner + tuple represents a single message to be written + and consists of and ID (string) that must be unique + within the list of messages, the message body itself + which can be a maximum of 64K in length, an + integer which represents the delay time (in seconds) + for the message (0-900) before the message will + be delivered to the queue, and an optional dict of + message attributes like those passed to ``send_message`` + in the connection class. + """ + return self.connection.send_message_batch(self, messages) + + def new_message(self, body='', **kwargs): + """ + Create new message of appropriate class. + + :type body: message body + :param body: The body of the newly created message (optional). + + :rtype: :class:`boto.sqs.message.Message` + :return: A new Message object + """ + m = self.message_class(self, body, **kwargs) + m.queue = self + return m + + # get a variable number of messages, returns a list of messages + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None, wait_time_seconds=None, + message_attributes=None): + """ + Get a variable number of messages. + + :type num_messages: int + :param num_messages: The maximum number of messages to read from + the queue. + + :type visibility_timeout: int + :param visibility_timeout: The VisibilityTimeout for the messages read. + + :type attributes: str + :param attributes: The name of additional attribute to return + with response or All if you want all attributes. The + default is to return no additional attributes. Valid + values: All SenderId SentTimestamp ApproximateReceiveCount + ApproximateFirstReceiveTimestamp + + :type wait_time_seconds: int + :param wait_time_seconds: The duration (in seconds) for which the call + will wait for a message to arrive in the queue before returning. + If a message is available, the call will return sooner than + wait_time_seconds. + + :type message_attributes: list + :param message_attributes: The name(s) of additional message + attributes to return. The default is to return no additional + message attributes. Use ``['All']`` or ``['.*']`` to return all. + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + """ + return self.connection.receive_message( + self, number_messages=num_messages, + visibility_timeout=visibility_timeout, attributes=attributes, + wait_time_seconds=wait_time_seconds, + message_attributes=message_attributes) + + def delete_message(self, message): + """ + Delete a message from the queue. + + :type message: :class:`boto.sqs.message.Message` + :param message: The :class:`boto.sqs.message.Message` object to delete. + + :rtype: bool + :return: True if successful, False otherwise + """ + return self.connection.delete_message(self, message) + + def delete_message_batch(self, messages): + """ + Deletes a list of messages in a single request. + + :type messages: List of :class:`boto.sqs.message.Message` objects. + :param messages: A list of message objects. + """ + return self.connection.delete_message_batch(self, messages) + + def change_message_visibility_batch(self, messages): + """ + A batch version of change_message_visibility that can act + on up to 10 messages at a time. + + :type messages: List of tuples. + :param messages: A list of tuples where each tuple consists + of a :class:`boto.sqs.message.Message` object and an integer + that represents the new visibility timeout for that message. + """ + return self.connection.change_message_visibility_batch(self, messages) + + def delete(self): + """ + Delete the queue. + """ + return self.connection.delete_queue(self) + + def purge(self): + """ + Purge all messages in the queue. + """ + return self.connection.purge_queue(self) + + def clear(self, page_size=10, vtimeout=10): + """Deprecated utility function to remove all messages from a queue""" + return self.purge() + + def count(self, page_size=10, vtimeout=10): + """ + Utility function to count the number of messages in a queue. + Note: This function now calls GetQueueAttributes to obtain + an 'approximate' count of the number of messages in a queue. + """ + a = self.get_attributes('ApproximateNumberOfMessages') + return int(a['ApproximateNumberOfMessages']) + + def count_slow(self, page_size=10, vtimeout=10): + """ + Deprecated. This is the old 'count' method that actually counts + the messages by reading them all. This gives an accurate count but + is very slow for queues with non-trivial number of messasges. + Instead, use get_attributes('ApproximateNumberOfMessages') to take + advantage of the new SQS capability. This is retained only for + the unit tests. + """ + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'): + """Utility function to dump the messages in a queue to a file + NOTE: Page size must be < 10 else SQS errors""" + fp = open(file_name, 'wb') + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + fp.write(m.get_body()) + if sep: + fp.write(sep) + n += 1 + l = self.get_messages(page_size, vtimeout) + fp.close() + return n + + def save_to_file(self, fp, sep='\n'): + """ + Read all messages from the queue and persist them to file-like object. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + fp.write(m.get_body()) + if sep: + fp.write(sep) + self.delete_message(m) + m = self.read() + return n + + def save_to_filename(self, file_name, sep='\n'): + """ + Read all messages from the queue and persist them to local file. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + fp = open(file_name, 'wb') + n = self.save_to_file(fp, sep) + fp.close() + return n + + # for backwards compatibility + save = save_to_filename + + def save_to_s3(self, bucket): + """ + Read all messages from the queue and persist them to S3. + Messages are stored in the S3 bucket using a naming scheme of:: + + / + + Messages are deleted from the queue after being saved to S3. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + key = bucket.new_key('%s/%s' % (self.id, m.id)) + key.set_contents_from_string(m.get_body()) + self.delete_message(m) + m = self.read() + return n + + def load_from_s3(self, bucket, prefix=None): + """ + Load messages previously saved to S3. + """ + n = 0 + if prefix: + prefix = '%s/' % prefix + else: + prefix = '%s/' % self.id[1:] + rs = bucket.list(prefix=prefix) + for key in rs: + n += 1 + m = self.new_message(key.get_contents_as_string()) + self.write(m) + return n + + def load_from_file(self, fp, sep='\n'): + """Utility function to load messages from a file-like object to a queue""" + n = 0 + body = '' + l = fp.readline() + while l: + if l == sep: + m = Message(self, body) + self.write(m) + n += 1 + print('writing message %d' % n) + body = '' + else: + body = body + l + l = fp.readline() + return n + + def load_from_filename(self, file_name, sep='\n'): + """Utility function to load messages from a local filename to a queue""" + fp = open(file_name, 'rb') + n = self.load_from_file(fp, sep) + fp.close() + return n + + # for backward compatibility + load = load_from_filename + diff --git a/ext/boto/sqs/regioninfo.py b/ext/boto/sqs/regioninfo.py new file mode 100644 index 0000000000..d21dff9cda --- /dev/null +++ b/ext/boto/sqs/regioninfo.py @@ -0,0 +1,33 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo + +class SQSRegionInfo(RegionInfo): + + def __init__(self, connection=None, name=None, endpoint=None, + connection_cls=None): + from boto.sqs.connection import SQSConnection + super(SQSRegionInfo, self).__init__(connection, name, endpoint, + SQSConnection) diff --git a/ext/boto/storage_uri.py b/ext/boto/storage_uri.py new file mode 100644 index 0000000000..128b2ca4fc --- /dev/null +++ b/ext/boto/storage_uri.py @@ -0,0 +1,905 @@ +# Copyright 2010 Google Inc. +# Copyright (c) 2011, Nexenta Systems Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +import os +import sys +import textwrap +from boto.s3.deletemarker import DeleteMarker +from boto.exception import BotoClientError +from boto.exception import InvalidUriError + + +class StorageUri(object): + """ + Base class for representing storage provider-independent bucket and + object name with a shorthand URI-like syntax. + + This is an abstract class: the constructor cannot be called (throws an + exception if you try). + """ + + connection = None + # Optional args that can be set from one of the concrete subclass + # constructors, to change connection behavior (e.g., to override + # https_connection_factory). + connection_args = None + + # Map of provider scheme ('s3' or 'gs') to AWSAuthConnection object. We + # maintain a pool here in addition to the connection pool implemented + # in AWSAuthConnection because the latter re-creates its connection pool + # every time that class is instantiated (so the current pool is used to + # avoid re-instantiating AWSAuthConnection). + provider_pool = {} + + def __init__(self): + """Uncallable constructor on abstract base StorageUri class. + """ + raise BotoClientError('Attempt to instantiate abstract StorageUri ' + 'class') + + def __repr__(self): + """Returns string representation of URI.""" + return self.uri + + def equals(self, uri): + """Returns true if two URIs are equal.""" + return self.uri == uri.uri + + def check_response(self, resp, level, uri): + if resp is None: + raise InvalidUriError('\n'.join(textwrap.wrap( + 'Attempt to get %s for "%s" failed. This can happen if ' + 'the URI refers to a non-existent object or if you meant to ' + 'operate on a directory (e.g., leaving off -R option on gsutil ' + 'cp, mv, or ls of a bucket)' % (level, uri), 80))) + + def _check_bucket_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.bucket_name: + raise InvalidUriError( + '%s on bucket-less URI (%s)' % (function_name, self.uri)) + + def _check_object_uri(self, function_name): + if issubclass(type(self), BucketStorageUri) and not self.object_name: + raise InvalidUriError('%s on object-less URI (%s)' % + (function_name, self.uri)) + + def _warn_about_args(self, function_name, **args): + for arg in args: + if args[arg]: + sys.stderr.write( + 'Warning: %s ignores argument: %s=%s\n' % + (function_name, arg, str(args[arg]))) + + def connect(self, access_key_id=None, secret_access_key=None, **kwargs): + """ + Opens a connection to appropriate provider, depending on provider + portion of URI. Requires Credentials defined in boto config file (see + boto/pyami/config.py). + @type storage_uri: StorageUri + @param storage_uri: StorageUri specifying a bucket or a bucket+object + @rtype: L{AWSAuthConnection} + @return: A connection to storage service provider of the given URI. + """ + connection_args = dict(self.connection_args or ()) + + if (hasattr(self, 'suppress_consec_slashes') and + 'suppress_consec_slashes' not in connection_args): + connection_args['suppress_consec_slashes'] = ( + self.suppress_consec_slashes) + connection_args.update(kwargs) + if not self.connection: + if self.scheme in self.provider_pool: + self.connection = self.provider_pool[self.scheme] + elif self.scheme == 's3': + from boto.s3.connection import S3Connection + self.connection = S3Connection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'gs': + from boto.gs.connection import GSConnection + # Use OrdinaryCallingFormat instead of boto-default + # SubdomainCallingFormat because the latter changes the hostname + # that's checked during cert validation for HTTPS connections, + # which will fail cert validation (when cert validation is + # enabled). + # + # The same is not true for S3's HTTPS certificates. In fact, + # we don't want to do this for S3 because S3 requires the + # subdomain to match the location of the bucket. If the proper + # subdomain is not used, the server will return a 301 redirect + # with no Location header. + # + # Note: the following import can't be moved up to the + # start of this file else it causes a config import failure when + # run from the resumable upload/download tests. + from boto.s3.connection import OrdinaryCallingFormat + connection_args['calling_format'] = OrdinaryCallingFormat() + self.connection = GSConnection(access_key_id, + secret_access_key, + **connection_args) + self.provider_pool[self.scheme] = self.connection + elif self.scheme == 'file': + from boto.file.connection import FileConnection + self.connection = FileConnection(self) + else: + raise InvalidUriError('Unrecognized scheme "%s"' % + self.scheme) + self.connection.debug = self.debug + return self.connection + + def has_version(self): + return (issubclass(type(self), BucketStorageUri) + and ((self.version_id is not None) + or (self.generation is not None))) + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + + def list_bucket(self, prefix='', delimiter='', headers=None, + all_versions=False): + self._check_bucket_uri('list_bucket') + bucket = self.get_bucket(headers=headers) + if all_versions: + return (v for v in bucket.list_versions( + prefix=prefix, delimiter=delimiter, headers=headers) + if not isinstance(v, DeleteMarker)) + else: + return bucket.list(prefix=prefix, delimiter=delimiter, + headers=headers) + + def get_all_keys(self, validate=False, headers=None, prefix=None): + bucket = self.get_bucket(validate, headers) + return bucket.get_all_keys(headers) + + def get_bucket(self, validate=False, headers=None): + self._check_bucket_uri('get_bucket') + conn = self.connect() + bucket = conn.get_bucket(self.bucket_name, validate, headers) + self.check_response(bucket, 'bucket', self.uri) + return bucket + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + key = bucket.get_key(self.object_name, headers, version_id) + self.check_response(key, 'key', self.uri) + return key + + def new_key(self, validate=False, headers=None): + self._check_object_uri('new_key') + bucket = self.get_bucket(validate, headers) + return bucket.new_key(self.object_name) + + def get_contents_to_stream(self, fp, headers=None, version_id=None): + self._check_object_uri('get_key') + self._warn_about_args('get_key', validate=False) + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_to_file(fp, headers, version_id=version_id) + + def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None, + res_download_handler=None, response_headers=None, + hash_algs=None): + self._check_object_uri('get_contents_to_file') + key = self.get_key(None, headers) + self.check_response(key, 'key', self.uri) + if hash_algs: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers, + hash_algs=hash_algs) + else: + key.get_contents_to_file(fp, headers, cb, num_cb, torrent, + version_id, res_download_handler, + response_headers) + + def get_contents_as_string(self, validate=False, headers=None, cb=None, + num_cb=10, torrent=False, version_id=None): + self._check_object_uri('get_contents_as_string') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + return key.get_contents_as_string(headers, cb, num_cb, torrent, + version_id) + + def acl_class(self): + conn = self.connect() + acl_class = conn.provider.acl_class + self.check_response(acl_class, 'acl_class', self.uri) + return acl_class + + def canned_acls(self): + conn = self.connect() + canned_acls = conn.provider.canned_acls + self.check_response(canned_acls, 'canned_acls', self.uri) + return canned_acls + + +class BucketStorageUri(StorageUri): + """ + StorageUri subclass that handles bucket storage providers. + Callers should instantiate this class by calling boto.storage_uri(). + """ + + delim = '/' + capabilities = set([]) # A set of additional capabilities. + + def __init__(self, scheme, bucket_name=None, object_name=None, + debug=0, connection_args=None, suppress_consec_slashes=True, + version_id=None, generation=None, is_latest=False): + """Instantiate a BucketStorageUri from scheme,bucket,object tuple. + + @type scheme: string + @param scheme: URI scheme naming the storage provider (gs, s3, etc.) + @type bucket_name: string + @param bucket_name: bucket name + @type object_name: string + @param object_name: object name, excluding generation/version. + @type debug: int + @param debug: debug level to pass in to connection (range 0..2) + @type connection_args: map + @param connection_args: optional map containing args to be + passed to {S3,GS}Connection constructor (e.g., to override + https_connection_factory). + @param suppress_consec_slashes: If provided, controls whether + consecutive slashes will be suppressed in key paths. + @param version_id: Object version id (S3-specific). + @param generation: Object generation number (GCS-specific). + @param is_latest: boolean indicating that a versioned object is the + current version + + After instantiation the components are available in the following + fields: scheme, bucket_name, object_name, version_id, generation, + is_latest, versionless_uri, version_specific_uri, uri. + Note: If instantiated without version info, the string representation + for a URI stays versionless; similarly, if instantiated with version + info, the string representation for a URI stays version-specific. If you + call one of the uri.set_contents_from_xyz() methods, a specific object + version will be created, and its version-specific URI string can be + retrieved from version_specific_uri even if the URI was instantiated + without version info. + """ + + self.scheme = scheme + self.bucket_name = bucket_name + self.object_name = object_name + self.debug = debug + if connection_args: + self.connection_args = connection_args + self.suppress_consec_slashes = suppress_consec_slashes + self.version_id = version_id + self.generation = generation and int(generation) + self.is_latest = is_latest + self.is_version_specific = bool(self.generation) or bool(version_id) + self._build_uri_strings() + + def _build_uri_strings(self): + if self.bucket_name and self.object_name: + self.versionless_uri = '%s://%s/%s' % (self.scheme, self.bucket_name, + self.object_name) + if self.generation: + self.version_specific_uri = '%s#%s' % (self.versionless_uri, + self.generation) + elif self.version_id: + self.version_specific_uri = '%s#%s' % ( + self.versionless_uri, self.version_id) + if self.is_version_specific: + self.uri = self.version_specific_uri + else: + self.uri = self.versionless_uri + elif self.bucket_name: + self.uri = ('%s://%s/' % (self.scheme, self.bucket_name)) + else: + self.uri = ('%s://' % self.scheme) + + def _update_from_key(self, key): + self._update_from_values( + getattr(key, 'version_id', None), + getattr(key, 'generation', None), + getattr(key, 'is_latest', None), + getattr(key, 'md5', None)) + + def _update_from_values(self, version_id, generation, is_latest, md5): + self.version_id = version_id + self.generation = generation + self.is_latest = is_latest + self._build_uri_strings() + self.md5 = md5 + + def get_key(self, validate=False, headers=None, version_id=None): + self._check_object_uri('get_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + key = bucket.get_key(self.object_name, headers, + version_id=(version_id or self.version_id)) + elif self.get_provider().name == 'google': + key = bucket.get_key(self.object_name, headers, + generation=self.generation) + self.check_response(key, 'key', self.uri) + return key + + def delete_key(self, validate=False, headers=None, version_id=None, + mfa_token=None): + self._check_object_uri('delete_key') + bucket = self.get_bucket(validate, headers) + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + return bucket.delete_key(self.object_name, headers, version_id, + mfa_token) + elif self.get_provider().name == 'google': + return bucket.delete_key(self.object_name, headers, + generation=self.generation) + + def clone_replace_name(self, new_name): + """Instantiate a BucketStorageUri from the current BucketStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + self._check_bucket_uri('clone_replace_name') + return BucketStorageUri( + self.scheme, bucket_name=self.bucket_name, object_name=new_name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes) + + def clone_replace_key(self, key): + """Instantiate a BucketStorageUri from the current BucketStorageUri, by + replacing the object name with the object name and other metadata found + in the given Key object (including generation). + + @type key: Key + @param key: key for the new StorageUri to represent + """ + self._check_bucket_uri('clone_replace_key') + version_id = None + generation = None + is_latest = False + if hasattr(key, 'version_id'): + version_id = key.version_id + if hasattr(key, 'generation'): + generation = key.generation + if hasattr(key, 'is_latest'): + is_latest = key.is_latest + + return BucketStorageUri( + key.provider.get_provider_name(), + bucket_name=key.bucket.name, + object_name=key.name, + debug=self.debug, + suppress_consec_slashes=self.suppress_consec_slashes, + version_id=version_id, + generation=generation, + is_latest=is_latest) + + def get_acl(self, validate=False, headers=None, version_id=None): + """returns a bucket's acl""" + self._check_bucket_uri('get_acl') + bucket = self.get_bucket(validate, headers) + # This works for both bucket- and object- level ACLs (former passes + # key_name=None): + key_name = self.object_name or '' + if self.get_provider().name == 'aws': + version_id = version_id or self.version_id + acl = bucket.get_acl(key_name, headers, version_id) + else: + acl = bucket.get_acl(key_name, headers, generation=self.generation) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_def_acl(self, validate=False, headers=None): + """returns a bucket's default object acl""" + self._check_bucket_uri('get_def_acl') + bucket = self.get_bucket(validate, headers) + acl = bucket.get_def_acl(headers) + self.check_response(acl, 'acl', self.uri) + return acl + + def get_cors(self, validate=False, headers=None): + """returns a bucket's CORS XML""" + self._check_bucket_uri('get_cors') + bucket = self.get_bucket(validate, headers) + cors = bucket.get_cors(headers) + self.check_response(cors, 'cors', self.uri) + return cors + + def set_cors(self, cors, validate=False, headers=None): + """sets or updates a bucket's CORS XML""" + self._check_bucket_uri('set_cors ') + bucket = self.get_bucket(validate, headers) + if self.scheme == 's3': + bucket.set_cors(cors, headers) + else: + bucket.set_cors(cors.to_xml(), headers) + + def get_location(self, validate=False, headers=None): + self._check_bucket_uri('get_location') + bucket = self.get_bucket(validate, headers) + return bucket.get_location() + + def get_storage_class(self, validate=False, headers=None): + self._check_bucket_uri('get_storage_class') + # StorageClass is defined as a bucket and object param for GCS, but + # only as a key param for S3. + if self.scheme != 'gs': + raise ValueError('get_storage_class() not supported for %s ' + 'URIs.' % self.scheme) + bucket = self.get_bucket(validate, headers) + return bucket.get_storage_class() + + def set_storage_class(self, storage_class, validate=False, headers=None): + """Updates a bucket's storage class.""" + self._check_bucket_uri('set_storage_class') + # StorageClass is defined as a bucket and object param for GCS, but + # only as a key param for S3. + if self.scheme != 'gs': + raise ValueError('set_storage_class() not supported for %s ' + 'URIs.' % self.scheme) + bucket = self.get_bucket(validate, headers) + bucket.set_storage_class(storage_class, headers) + + def get_subresource(self, subresource, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('get_subresource') + bucket = self.get_bucket(validate, headers) + return bucket.get_subresource(subresource, self.object_name, headers, + version_id) + + def add_group_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_group_email_grant') + if self.scheme != 'gs': + raise ValueError('add_group_email_grant() not supported for %s ' + 'URIs.' % self.scheme) + if self.object_name: + if recursive: + raise ValueError('add_group_email_grant() on key-ful URI cannot ' + 'specify recursive=True') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_group_email_grant(permission, email_address, headers) + elif self.bucket_name: + bucket = self.get_bucket(validate, headers) + bucket.add_group_email_grant(permission, email_address, recursive, + headers) + else: + raise InvalidUriError('add_group_email_grant() on bucket-less URI ' + '%s' % self.uri) + + def add_email_grant(self, permission, email_address, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_email_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_email_grant(permission, email_address, recursive, + headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_email_grant(permission, email_address) + + def add_user_grant(self, permission, user_id, recursive=False, + validate=False, headers=None): + self._check_bucket_uri('add_user_grant') + if not self.object_name: + bucket = self.get_bucket(validate, headers) + bucket.add_user_grant(permission, user_id, recursive, headers) + else: + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.add_user_grant(permission, user_id) + + def list_grants(self, headers=None): + self._check_bucket_uri('list_grants ') + bucket = self.get_bucket(headers) + return bucket.list_grants(headers) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return False + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return True + + def names_container(self): + """ + Returns True if this URI names a directory or bucket. Will return + False for bucket subdirs; providing bucket subdir semantics needs to + be done by the caller (like gsutil does). + """ + return bool(not self.object_name) + + def names_singleton(self): + """Returns True if this URI names a file or object.""" + return bool(self.object_name) + + def names_directory(self): + """Returns True if this URI names a directory.""" + return False + + def names_provider(self): + """Returns True if this URI names a provider.""" + return bool(not self.bucket_name) + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return bool(self.bucket_name) and bool(not self.object_name) + + def names_file(self): + """Returns True if this URI names a file.""" + return False + + def names_object(self): + """Returns True if this URI names an object.""" + return self.names_singleton() + + def is_stream(self): + """Returns True if this URI represents input/output stream.""" + return False + + def create_bucket(self, headers=None, location='', policy=None, + storage_class=None): + self._check_bucket_uri('create_bucket ') + conn = self.connect() + # Pass storage_class param only if this is a GCS bucket. (In S3 the + # storage class is specified on the key object.) + if self.scheme == 'gs': + return conn.create_bucket(self.bucket_name, headers, location, policy, + storage_class) + else: + return conn.create_bucket(self.bucket_name, headers, location, policy) + + def delete_bucket(self, headers=None): + self._check_bucket_uri('delete_bucket') + conn = self.connect() + return conn.delete_bucket(self.bucket_name, headers) + + def get_all_buckets(self, headers=None): + conn = self.connect() + return conn.get_all_buckets(headers) + + def get_provider(self): + conn = self.connect() + provider = conn.provider + self.check_response(provider, 'provider', self.uri) + return provider + + def set_acl(self, acl_or_str, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL.""" + self._check_bucket_uri('set_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_acl( + acl_or_str, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_acl(acl_or_str, key_name, headers, version_id) + + def set_xml_acl(self, xmlstring, key_name='', validate=False, headers=None, + version_id=None, if_generation=None, if_metageneration=None): + """Sets or updates a bucket's ACL with an XML string.""" + self._check_bucket_uri('set_xml_acl') + key_name = key_name or self.object_name or '' + bucket = self.get_bucket(validate, headers) + if self.generation: + bucket.set_xml_acl( + xmlstring, key_name, headers, generation=self.generation, + if_generation=if_generation, if_metageneration=if_metageneration) + else: + version_id = version_id or self.version_id + bucket.set_xml_acl(xmlstring, key_name, headers, + version_id=version_id) + + def set_def_xml_acl(self, xmlstring, validate=False, headers=None): + """Sets or updates a bucket's default object ACL with an XML string.""" + self._check_bucket_uri('set_def_xml_acl') + self.get_bucket(validate, headers).set_def_xml_acl(xmlstring, headers) + + def set_def_acl(self, acl_or_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object ACL.""" + self._check_bucket_uri('set_def_acl') + self.get_bucket(validate, headers).set_def_acl(acl_or_str, headers) + + def set_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's acl to a predefined (canned) value.""" + self._check_object_uri('set_canned_acl') + self._warn_about_args('set_canned_acl', version_id=version_id) + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_canned_acl(acl_str, headers) + + def set_def_canned_acl(self, acl_str, validate=False, headers=None, + version_id=None): + """Sets or updates a bucket's default object acl to a predefined + (canned) value.""" + self._check_bucket_uri('set_def_canned_acl ') + key = self.get_key(validate, headers) + self.check_response(key, 'key', self.uri) + key.set_def_canned_acl(acl_str, headers, version_id) + + def set_subresource(self, subresource, value, validate=False, headers=None, + version_id=None): + self._check_bucket_uri('set_subresource') + bucket = self.get_bucket(validate, headers) + bucket.set_subresource(subresource, value, self.object_name, headers, + version_id) + + def set_contents_from_string(self, s, headers=None, replace=True, + cb=None, num_cb=10, policy=None, md5=None, + reduced_redundancy=False): + self._check_object_uri('set_contents_from_string') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + if reduced_redundancy: + sys.stderr.write('Warning: GCS does not support ' + 'reduced_redundancy; argument ignored by ' + 'set_contents_from_string') + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5) + else: + result = key.set_contents_from_string( + s, headers, replace, cb, num_cb, policy, md5, + reduced_redundancy) + self._update_from_key(key) + return result + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, + num_cb=10, policy=None, md5=None, size=None, + rewind=False, res_upload_handler=None): + self._check_object_uri('set_contents_from_file') + key = self.new_key(headers=headers) + if self.scheme == 'gs': + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind, res_upload_handler=res_upload_handler) + if res_upload_handler: + self._update_from_values(None, res_upload_handler.generation, + None, md5) + else: + self._warn_about_args('set_contents_from_file', + res_upload_handler=res_upload_handler) + result = key.set_contents_from_file( + fp, headers, replace, cb, num_cb, policy, md5, size=size, + rewind=rewind) + self._update_from_key(key) + return result + + def set_contents_from_stream(self, fp, headers=None, replace=True, cb=None, + policy=None, reduced_redundancy=False): + self._check_object_uri('set_contents_from_stream') + dst_key = self.new_key(False, headers) + result = dst_key.set_contents_from_stream( + fp, headers, replace, cb, policy=policy, + reduced_redundancy=reduced_redundancy) + self._update_from_key(dst_key) + return result + + def copy_key(self, src_bucket_name, src_key_name, metadata=None, + src_version_id=None, storage_class='STANDARD', + preserve_acl=False, encrypt_key=False, headers=None, + query_args=None, src_generation=None): + """Returns newly created key.""" + self._check_object_uri('copy_key') + dst_bucket = self.get_bucket(validate=False, headers=headers) + if src_generation: + return dst_bucket.copy_key( + new_key_name=self.object_name, + src_bucket_name=src_bucket_name, + src_key_name=src_key_name, metadata=metadata, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args, + src_generation=src_generation) + else: + return dst_bucket.copy_key( + new_key_name=self.object_name, + src_bucket_name=src_bucket_name, src_key_name=src_key_name, + metadata=metadata, src_version_id=src_version_id, + storage_class=storage_class, preserve_acl=preserve_acl, + encrypt_key=encrypt_key, headers=headers, query_args=query_args) + + def enable_logging(self, target_bucket, target_prefix=None, validate=False, + headers=None, version_id=None): + self._check_bucket_uri('enable_logging') + bucket = self.get_bucket(validate, headers) + bucket.enable_logging(target_bucket, target_prefix, headers=headers) + + def disable_logging(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('disable_logging') + bucket = self.get_bucket(validate, headers) + bucket.disable_logging(headers=headers) + + def get_logging_config(self, validate=False, headers=None, version_id=None): + self._check_bucket_uri('get_logging_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_logging_config(headers=headers) + + def set_website_config(self, main_page_suffix=None, error_key=None, + validate=False, headers=None): + self._check_bucket_uri('set_website_config') + bucket = self.get_bucket(validate, headers) + if not (main_page_suffix or error_key): + bucket.delete_website_configuration(headers) + else: + bucket.configure_website(main_page_suffix, error_key, headers) + + def get_website_config(self, validate=False, headers=None): + self._check_bucket_uri('get_website_config') + bucket = self.get_bucket(validate, headers) + return bucket.get_website_configuration(headers) + + def get_versioning_config(self, headers=None): + self._check_bucket_uri('get_versioning_config') + bucket = self.get_bucket(False, headers) + return bucket.get_versioning_status(headers) + + def configure_versioning(self, enabled, headers=None): + self._check_bucket_uri('configure_versioning') + bucket = self.get_bucket(False, headers) + return bucket.configure_versioning(enabled, headers) + + def set_metadata(self, metadata_plus, metadata_minus, preserve_acl, + headers=None): + return self.get_key(False).set_remote_metadata(metadata_plus, + metadata_minus, + preserve_acl, + headers=headers) + + def compose(self, components, content_type=None, headers=None): + self._check_object_uri('compose') + component_keys = [] + for suri in components: + component_keys.append(suri.new_key()) + component_keys[-1].generation = suri.generation + self.generation = self.new_key().compose( + component_keys, content_type=content_type, headers=headers) + self._build_uri_strings() + return self + + def get_lifecycle_config(self, validate=False, headers=None): + """Returns a bucket's lifecycle configuration.""" + self._check_bucket_uri('get_lifecycle_config') + bucket = self.get_bucket(validate, headers) + lifecycle_config = bucket.get_lifecycle_config(headers) + self.check_response(lifecycle_config, 'lifecycle', self.uri) + return lifecycle_config + + def configure_lifecycle(self, lifecycle_config, validate=False, + headers=None): + """Sets or updates a bucket's lifecycle configuration.""" + self._check_bucket_uri('configure_lifecycle') + bucket = self.get_bucket(validate, headers) + bucket.configure_lifecycle(lifecycle_config, headers) + + def exists(self, headers=None): + """Returns True if the object exists or False if it doesn't""" + if not self.object_name: + raise InvalidUriError('exists on object-less URI (%s)' % self.uri) + bucket = self.get_bucket() + key = bucket.get_key(self.object_name, headers=headers) + return bool(key) + + +class FileStorageUri(StorageUri): + """ + StorageUri subclass that handles files in the local file system. + Callers should instantiate this class by calling boto.storage_uri(). + + See file/README about how we map StorageUri operations onto a file system. + """ + + delim = os.sep + + def __init__(self, object_name, debug, is_stream=False): + """Instantiate a FileStorageUri from a path name. + + @type object_name: string + @param object_name: object name + @type debug: boolean + @param debug: whether to enable debugging on this StorageUri + + After instantiation the components are available in the following + fields: uri, scheme, bucket_name (always blank for this "anonymous" + bucket), object_name. + """ + + self.scheme = 'file' + self.bucket_name = '' + self.object_name = object_name + self.uri = 'file://' + object_name + self.debug = debug + self.stream = is_stream + + def clone_replace_name(self, new_name): + """Instantiate a FileStorageUri from the current FileStorageUri, + but replacing the object_name. + + @type new_name: string + @param new_name: new object name + """ + return FileStorageUri(new_name, self.debug, self.stream) + + def is_file_uri(self): + """Returns True if this URI names a file or directory.""" + return True + + def is_cloud_uri(self): + """Returns True if this URI names a bucket or object.""" + return False + + def names_container(self): + """Returns True if this URI names a directory or bucket.""" + return self.names_directory() + + def names_singleton(self): + """Returns True if this URI names a file (or stream) or object.""" + return not self.names_container() + + def names_directory(self): + """Returns True if this URI names a directory.""" + if self.stream: + return False + return os.path.isdir(self.object_name) + + def names_provider(self): + """Returns True if this URI names a provider.""" + return False + + def names_bucket(self): + """Returns True if this URI names a bucket.""" + return False + + def names_file(self): + """Returns True if this URI names a file.""" + return self.names_singleton() + + def names_object(self): + """Returns True if this URI names an object.""" + return False + + def is_stream(self): + """Returns True if this URI represents input/output stream. + """ + return bool(self.stream) + + def close(self): + """Closes the underlying file. + """ + self.get_key().close() + + def exists(self, _headers_not_used=None): + """Returns True if the file exists or False if it doesn't""" + # The _headers_not_used parameter is ignored. It is only there to ensure + # that this method's signature is identical to the exists method on the + # BucketStorageUri class. + return os.path.exists(self.object_name) diff --git a/ext/boto/sts/__init__.py b/ext/boto/sts/__init__.py new file mode 100644 index 0000000000..1af5a148fb --- /dev/null +++ b/ext/boto/sts/__init__.py @@ -0,0 +1,51 @@ +# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010-2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sts.connection import STSConnection +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the STS service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` instances + """ + return get_regions('sts', connection_cls=STSConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.sts.connection.STSConnection`. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.sts.connection.STSConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('sts', region_name, connection_cls=STSConnection, + **kw_params) diff --git a/ext/boto/sts/connection.py b/ext/boto/sts/connection.py new file mode 100644 index 0000000000..8c0cf4b269 --- /dev/null +++ b/ext/boto/sts/connection.py @@ -0,0 +1,652 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.provider import Provider, NO_CREDENTIALS_PROVIDED +from boto.regioninfo import RegionInfo +from boto.sts.credentials import Credentials, FederationToken, AssumedRole +from boto.sts.credentials import DecodeAuthorizationMessage +import boto +import boto.utils +import datetime +import threading + +_session_token_cache = {} + + +class STSConnection(AWSQueryConnection): + """ + AWS Security Token Service + The AWS Security Token Service is a web service that enables you + to request temporary, limited-privilege credentials for AWS + Identity and Access Management (IAM) users or for users that you + authenticate (federated users). This guide provides descriptions + of the AWS Security Token Service API. + + For more detailed information about using this service, go to + `Using Temporary Security Credentials`_. + + For information about setting up signatures and authorization + through the API, go to `Signing AWS API Requests`_ in the AWS + General Reference . For general information about the Query API, + go to `Making Query Requests`_ in Using IAM . For information + about using security tokens with other AWS products, go to `Using + Temporary Security Credentials to Access AWS`_ in Using Temporary + Security Credentials . + + If you're new to AWS and need additional technical information + about a specific AWS product, you can find the product's technical + documentation at `http://aws.amazon.com/documentation/`_. + + We will refer to Amazon Identity and Access Management using the + abbreviated form IAM. All copyrights and legal protections still + apply. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sts.amazonaws.com' + APIVersion = '2011-06-15' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', + converter=None, validate_certs=True, anon=False, + security_token=None, profile_name=None): + """ + :type anon: boolean + :param anon: If this parameter is True, the ``STSConnection`` object + will make anonymous requests, and it will not use AWS + Credentials or even search for AWS Credentials to make these + requests. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint, + connection_cls=STSConnection) + self.region = region + self.anon = anon + self._mutex = threading.Semaphore() + provider = 'aws' + # If an anonymous request is sent, do not try to look for credentials. + # So we pass in dummy values for the access key id, secret access + # key, and session token. It does not matter that they are + # not actual values because the request is anonymous. + if self.anon: + provider = Provider('aws', NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED, + NO_CREDENTIALS_PROVIDED) + super(STSConnection, self).__init__(aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path, + validate_certs=validate_certs, + security_token=security_token, + profile_name=profile_name, + provider=provider) + + def _required_auth_capability(self): + if self.anon: + return ['sts-anon'] + else: + return ['hmac-v4'] + + def _check_token_cache(self, token_key, duration=None, window_seconds=60): + token = _session_token_cache.get(token_key, None) + if token: + now = datetime.datetime.utcnow() + expires = boto.utils.parse_ts(token.expiration) + delta = expires - now + if delta < datetime.timedelta(seconds=window_seconds): + msg = 'Cached session token %s is expired' % token_key + boto.log.debug(msg) + token = None + return token + + def _get_session_token(self, duration=None, + mfa_serial_number=None, mfa_token=None): + params = {} + if duration: + params['DurationSeconds'] = duration + if mfa_serial_number: + params['SerialNumber'] = mfa_serial_number + if mfa_token: + params['TokenCode'] = mfa_token + return self.get_object('GetSessionToken', params, + Credentials, verb='POST') + + def get_session_token(self, duration=None, force_new=False, + mfa_serial_number=None, mfa_token=None): + """ + Return a valid session token. Because retrieving new tokens + from the Secure Token Service is a fairly heavyweight operation + this module caches previously retrieved tokens and returns + them when appropriate. Each token is cached with a key + consisting of the region name of the STS endpoint + concatenated with the requesting user's access id. If there + is a token in the cache meeting with this key, the session + expiration is checked to make sure it is still valid and if + so, the cached token is returned. Otherwise, a new session + token is requested from STS and it is placed into the cache + and returned. + + :type duration: int + :param duration: The number of seconds the credentials should + remain valid. + + :type force_new: bool + :param force_new: If this parameter is True, a new session token + will be retrieved from the Secure Token Service regardless + of whether there is a valid cached token or not. + + :type mfa_serial_number: str + :param mfa_serial_number: The serial number of an MFA device. + If this is provided and if the mfa_passcode provided is + valid, the temporary session token will be authorized with + to perform operations requiring the MFA device authentication. + + :type mfa_token: str + :param mfa_token: The 6 digit token associated with the + MFA device. + """ + token_key = '%s:%s' % (self.region.name, self.provider.access_key) + token = self._check_token_cache(token_key, duration) + if force_new or not token: + boto.log.debug('fetching a new token for %s' % token_key) + try: + self._mutex.acquire() + token = self._get_session_token(duration, + mfa_serial_number, + mfa_token) + _session_token_cache[token_key] = token + finally: + self._mutex.release() + return token + + def get_federation_token(self, name, duration=None, policy=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + for a federated user. A typical use is in a proxy application + that is getting temporary security credentials on behalf of + distributed applications inside a corporate network. Because + you must call the `GetFederationToken` action using the long- + term security credentials of an IAM user, this call is + appropriate in contexts where those credentials can be safely + stored, usually in a server-based application. + + **Note:** Do not use this call in mobile applications or + client-based web applications that directly get temporary + security credentials. For those types of applications, use + `AssumeRoleWithWebIdentity`. + + The `GetFederationToken` action must be called by using the + long-term AWS security credentials of the AWS account or an + IAM user. Credentials that are created by IAM users are valid + for the specified duration, between 900 seconds (15 minutes) + and 129600 seconds (36 hours); credentials that are created by + using account credentials have a maximum duration of 3600 + seconds (1 hour). + + The permissions that are granted to the federated user are the + intersection of the policy that is passed with the + `GetFederationToken` request and policies that are associated + with of the entity making the `GetFederationToken` call. + + For more information about how permissions work, see + `Controlling Permissions in Temporary Credentials`_ in Using + Temporary Security Credentials . For information about using + `GetFederationToken` to create temporary security credentials, + see `Creating Temporary Credentials to Enable Access for + Federated Users`_ in Using Temporary Security Credentials . + + :type name: string + :param name: The name of the federated user. The name is used as an + identifier for the temporary security credentials (such as `Bob`). + For example, you can reference the federated user name in a + resource-based policy, such as in an Amazon S3 bucket policy. + + :type policy: string + :param policy: A policy that specifies the permissions that are granted + to the federated user. By default, federated users have no + permissions; they do not inherit any from the IAM user. When you + specify a policy, the federated user's permissions are intersection + of the specified policy and the IAM user's policy. If you don't + specify a policy, federated users can only access AWS resources + that explicitly allow those federated users in a resource policy, + such as in an Amazon S3 bucket policy. + + :type duration: integer + :param duration: The duration, in seconds, that the session + should last. Acceptable durations for federation sessions range + from 900 seconds (15 minutes) to 129600 seconds (36 hours), with + 43200 seconds (12 hours) as the default. Sessions for AWS account + owners are restricted to a maximum of 3600 seconds (one hour). If + the duration is longer than one hour, the session for AWS account + owners defaults to one hour. + + """ + params = {'Name': name} + if duration: + params['DurationSeconds'] = duration + if policy: + params['Policy'] = policy + return self.get_object('GetFederationToken', params, + FederationToken, verb='POST') + + def assume_role(self, role_arn, role_session_name, policy=None, + duration_seconds=None, external_id=None, + mfa_serial_number=None, + mfa_token=None): + """ + Returns a set of temporary security credentials (consisting of + an access key ID, a secret access key, and a security token) + that you can use to access AWS resources that you might not + normally have access to. Typically, you use `AssumeRole` for + cross-account access or federation. + + For cross-account access, imagine that you own multiple + accounts and need to access resources in each account. You + could create long-term credentials in each account to access + those resources. However, managing all those credentials and + remembering which one can access which account can be time + consuming. Instead, you can create one set of long-term + credentials in one account and then use temporary security + credentials to access all the other accounts by assuming roles + in those accounts. For more information about roles, see + `Roles`_ in Using IAM . + + For federation, you can, for example, grant single sign-on + access to the AWS Management Console. If you already have an + identity and authentication system in your corporate network, + you don't have to recreate user identities in AWS in order to + grant those user identities access to AWS. Instead, after a + user has been authenticated, you call `AssumeRole` (and + specify the role with the appropriate permissions) to get + temporary security credentials for that user. With those + temporary security credentials, you construct a sign-in URL + that users can use to access the console. For more + information, see `Scenarios for Granting Temporary Access`_ in + AWS Security Token Service . + + The temporary security credentials are valid for the duration + that you specified when calling `AssumeRole`, which can be + from 900 seconds (15 minutes) to 3600 seconds (1 hour). The + default is 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed and any policies that are associated with the AWS + resource being accessed. You can further restrict the + permissions of the temporary security credentials by passing a + policy in the request. The resulting permissions are an + intersection of the role's access policy and the policy that + you passed. These policies and any applicable resource-based + policies are evaluated when calls to AWS service APIs are made + using the temporary security credentials. + + To assume a role, your AWS account must be trusted by the + role. The trust relationship is defined in the role's trust + policy when the IAM role is created. You must also have a + policy that allows you to call `sts:AssumeRole`. + + **Important:** You cannot call `Assumerole` by using AWS + account credentials; access will be denied. You must use IAM + user credentials to call `AssumeRole`. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + The session name is included as part of the `AssumedRoleUser`. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRole` call. The + resulting permissions of the temporary security credentials are an + intersection of this policy and the access policy that is + associated with the role. Use this policy to further restrict the + permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + :type external_id: string + :param external_id: A unique identifier that is used by third parties + to assume a role in their customers' accounts. For each role that + the third party can assume, they should instruct their customers to + create a role with the external ID that the third party generated. + Each time the third party assumes the role, they must pass the + customer's external ID. The external ID is useful in order to help + third parties bind a role to the customer who created it. For more + information about the external ID, see `About the External ID`_ in + Using Temporary Security Credentials . + + :type mfa_serial_number: string + :param mfa_serial_number: The identification number of the MFA device that + is associated with the user who is making the AssumeRole call. + Specify this value if the trust policy of the role being assumed + includes a condition that requires MFA authentication. The value is + either the serial number for a hardware device (such as + GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device + (such as arn:aws:iam::123456789012:mfa/user). Minimum length of 9. + Maximum length of 256. + + :type mfa_token: string + :param mfa_token: The value provided by the MFA device, if the trust + policy of the role being assumed requires MFA (that is, if the + policy includes a condition that tests for MFA). If the role being + assumed requires MFA and if the TokenCode value is missing or + expired, the AssumeRole call returns an "access denied" errror. + Minimum length of 6. Maximum length of 6. + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name + } + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + if external_id is not None: + params['ExternalId'] = external_id + if mfa_serial_number is not None: + params['SerialNumber'] = mfa_serial_number + if mfa_token is not None: + params['TokenCode'] = mfa_token + return self.get_object('AssumeRole', params, AssumedRole, verb='POST') + + def assume_role_with_saml(self, role_arn, principal_arn, saml_assertion, + policy=None, duration_seconds=None): + """ + Returns a set of temporary security credentials for users who + have been authenticated via a SAML authentication response. + This operation provides a mechanism for tying an enterprise + identity store or directory to role-based AWS access without + user-specific credentials or configuration. + + The temporary security credentials returned by this operation + consist of an access key ID, a secret access key, and a + security token. Applications can use these temporary security + credentials to sign calls to AWS services. The credentials are + valid for the duration that you specified when calling + `AssumeRoleWithSAML`, which can be up to 3600 seconds (1 hour) + or until the time specified in the SAML authentication + response's `NotOnOrAfter` value, whichever is shorter. + + The maximum duration for a session is 1 hour, and the minimum + duration is 15 minutes, even if values outside this range are + specified. + + Optionally, you can pass an AWS IAM access policy to this + operation. The temporary security credentials that are + returned by the operation have the permissions that are + associated with the access policy of the role being assumed, + except for any permissions explicitly denied by the policy you + pass. This gives you a way to further restrict the permissions + for the federated user. These policies and any applicable + resource-based policies are evaluated when calls to AWS are + made using the temporary security credentials. + + Before your application can call `AssumeRoleWithSAML`, you + must configure your SAML identity provider (IdP) to issue the + claims required by AWS. Additionally, you must use AWS + Identity and Access Management (AWS IAM) to create a SAML + provider entity in your AWS account that represents your + identity provider, and create an AWS IAM role that specifies + this SAML provider in its trust policy. + + Calling `AssumeRoleWithSAML` does not require the use of AWS + security credentials. The identity of the caller is validated + by using keys in the metadata document that is uploaded for + the SAML provider entity for your identity provider. + + For more information, see the following resources: + + + + `Creating Temporary Security Credentials for SAML + Federation`_ in the Using Temporary Security Credentials + guide. + + `SAML Providers`_ in the Using IAM guide. + + `Configuring a Relying Party and Claims in the Using IAM + guide. `_ + + `Creating a Role for SAML-Based Federation`_ in the Using + IAM guide. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type principal_arn: string + :param principal_arn: The Amazon Resource Name (ARN) of the SAML + provider in AWS IAM that describes the IdP. + + :type saml_assertion: string + :param saml_assertion: The base-64 encoded SAML authentication response + provided by the IdP. + For more information, see `Configuring a Relying Party and Adding + Claims`_ in the Using IAM guide. + + :type policy: string + :param policy: + An AWS IAM policy in JSON format. + + The temporary security credentials that are returned by this operation + have the permissions that are associated with the access policy of + the role being assumed, except for any permissions explicitly + denied by the policy you pass. These policies and any applicable + resource-based policies are evaluated when calls to AWS are made + using the temporary security credentials. + + The policy must be 2048 bytes or shorter, and its packed size must be + less than 450 bytes. + + :type duration_seconds: integer + :param duration_seconds: + The duration, in seconds, of the role session. The value can range from + 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the + value is set to 3600 seconds. An expiration can also be specified + in the SAML authentication response's `NotOnOrAfter` value. The + actual expiration time is whichever value is shorter. + + The maximum duration for a session is 1 hour, and the minimum duration + is 15 minutes, even if values outside this range are specified. + + """ + params = { + 'RoleArn': role_arn, + 'PrincipalArn': principal_arn, + 'SAMLAssertion': saml_assertion, + } + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + return self.get_object('AssumeRoleWithSAML', params, AssumedRole, + verb='POST') + + def assume_role_with_web_identity(self, role_arn, role_session_name, + web_identity_token, provider_id=None, + policy=None, duration_seconds=None): + """ + Returns a set of temporary security credentials for users who + have been authenticated in a mobile or web application with a + web identity provider, such as Login with Amazon, Facebook, or + Google. `AssumeRoleWithWebIdentity` is an API call that does + not require the use of AWS security credentials. Therefore, + you can distribute an application (for example, on mobile + devices) that requests temporary security credentials without + including long-term AWS credentials in the application or by + deploying server-based proxy services that use long-term AWS + credentials. For more information, see `Creating a Mobile + Application with Third-Party Sign-In`_ in AWS Security Token + Service . + + The temporary security credentials consist of an access key + ID, a secret access key, and a security token. Applications + can use these temporary security credentials to sign calls to + AWS service APIs. The credentials are valid for the duration + that you specified when calling `AssumeRoleWithWebIdentity`, + which can be from 900 seconds (15 minutes) to 3600 seconds (1 + hour). By default, the temporary security credentials are + valid for 1 hour. + + The temporary security credentials that are returned from the + `AssumeRoleWithWebIdentity` response have the permissions that + are associated with the access policy of the role being + assumed. You can further restrict the permissions of the + temporary security credentials by passing a policy in the + request. The resulting permissions are an intersection of the + role's access policy and the policy that you passed. These + policies and any applicable resource-based policies are + evaluated when calls to AWS service APIs are made using the + temporary security credentials. + + Before your application can call `AssumeRoleWithWebIdentity`, + you must have an identity token from a supported identity + provider and create a role that the application can assume. + The role that your application assumes must trust the identity + provider that is associated with the identity token. In other + words, the identity provider must be specified in the role's + trust policy. For more information, see ` Creating Temporary + Security Credentials for Mobile Apps Using Third-Party + Identity Providers`_. + + :type role_arn: string + :param role_arn: The Amazon Resource Name (ARN) of the role that the + caller is assuming. + + :type role_session_name: string + :param role_session_name: An identifier for the assumed role session. + Typically, you pass the name or identifier that is associated with + the user who is using your application. That way, the temporary + security credentials that your application will use are associated + with that user. This session name is included as part of the ARN + and assumed role ID in the `AssumedRoleUser` response element. + + :type web_identity_token: string + :param web_identity_token: The OAuth 2.0 access token or OpenID Connect + ID token that is provided by the identity provider. Your + application must get this token by authenticating the user who is + using your application with a web identity provider before the + application makes an `AssumeRoleWithWebIdentity` call. + + :type provider_id: string + :param provider_id: Specify this value only for OAuth access tokens. Do + not specify this value for OpenID Connect ID tokens, such as + `accounts.google.com`. This is the fully-qualified host component + of the domain name of the identity provider. Do not include URL + schemes and port numbers. Currently, `www.amazon.com` and + `graph.facebook.com` are supported. + + :type policy: string + :param policy: A supplemental policy that is associated with the + temporary security credentials from the `AssumeRoleWithWebIdentity` + call. The resulting permissions of the temporary security + credentials are an intersection of this policy and the access + policy that is associated with the role. Use this policy to further + restrict the permissions of the temporary security credentials. + + :type duration_seconds: integer + :param duration_seconds: The duration, in seconds, of the role session. + The value can range from 900 seconds (15 minutes) to 3600 seconds + (1 hour). By default, the value is set to 3600 seconds. + + """ + params = { + 'RoleArn': role_arn, + 'RoleSessionName': role_session_name, + 'WebIdentityToken': web_identity_token, + } + if provider_id is not None: + params['ProviderId'] = provider_id + if policy is not None: + params['Policy'] = policy + if duration_seconds is not None: + params['DurationSeconds'] = duration_seconds + return self.get_object( + 'AssumeRoleWithWebIdentity', + params, + AssumedRole, + verb='POST' + ) + + def decode_authorization_message(self, encoded_message): + """ + Decodes additional information about the authorization status + of a request from an encoded message returned in response to + an AWS request. + + For example, if a user is not authorized to perform an action + that he or she has requested, the request returns a + `Client.UnauthorizedOperation` response (an HTTP 403 + response). Some AWS actions additionally return an encoded + message that can provide details about this authorization + failure. + Only certain AWS actions return an encoded authorization + message. The documentation for an individual action indicates + whether that action returns an encoded message in addition to + returning an HTTP code. + The message is encoded because the details of the + authorization status can constitute privileged information + that the user who requested the action should not see. To + decode an authorization status message, a user must be granted + permissions via an IAM policy to request the + `DecodeAuthorizationMessage` ( + `sts:DecodeAuthorizationMessage`) action. + + The decoded message includes the following type of + information: + + + + Whether the request was denied due to an explicit deny or + due to the absence of an explicit allow. For more information, + see `Determining Whether a Request is Allowed or Denied`_ in + Using IAM . + + The principal who made the request. + + The requested action. + + The requested resource. + + The values of condition keys in the context of the user's + request. + + :type encoded_message: string + :param encoded_message: The encoded message that was returned with the + response. + + """ + params = { + 'EncodedMessage': encoded_message, + } + return self.get_object( + 'DecodeAuthorizationMessage', + params, + DecodeAuthorizationMessage, + verb='POST' + ) diff --git a/ext/boto/sts/credentials.py b/ext/boto/sts/credentials.py new file mode 100644 index 0000000000..7ab631942c --- /dev/null +++ b/ext/boto/sts/credentials.py @@ -0,0 +1,237 @@ +# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2011, Eucalyptus Systems, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +import datetime + +import boto.utils +from boto.compat import json + + +class Credentials(object): + """ + :ivar access_key: The AccessKeyID. + :ivar secret_key: The SecretAccessKey. + :ivar session_token: The session token that must be passed with + requests to use the temporary credentials + :ivar expiration: The timestamp for when the credentials will expire + """ + + def __init__(self, parent=None): + self.parent = parent + self.access_key = None + self.secret_key = None + self.session_token = None + self.expiration = None + self.request_id = None + + @classmethod + def from_json(cls, json_doc): + """ + Create and return a new Session Token based on the contents + of a JSON document. + + :type json_doc: str + :param json_doc: A string containing a JSON document with a + previously saved Credentials object. + """ + d = json.loads(json_doc) + token = cls() + token.__dict__.update(d) + return token + + @classmethod + def load(cls, file_path): + """ + Create and return a new Session Token based on the contents + of a previously saved JSON-format file. + + :type file_path: str + :param file_path: The fully qualified path to the JSON-format + file containing the previously saved Session Token information. + """ + fp = open(file_path) + json_doc = fp.read() + fp.close() + return cls.from_json(json_doc) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'AccessKeyId': + self.access_key = value + elif name == 'SecretAccessKey': + self.secret_key = value + elif name == 'SessionToken': + self.session_token = value + elif name == 'Expiration': + self.expiration = value + elif name == 'RequestId': + self.request_id = value + else: + pass + + def to_dict(self): + """ + Return a Python dict containing the important information + about this Session Token. + """ + return {'access_key': self.access_key, + 'secret_key': self.secret_key, + 'session_token': self.session_token, + 'expiration': self.expiration, + 'request_id': self.request_id} + + def save(self, file_path): + """ + Persist a Session Token to a file in JSON format. + + :type path: str + :param path: The fully qualified path to the file where the + the Session Token data should be written. Any previous + data in the file will be overwritten. To help protect + the credentials contained in the file, the permissions + of the file will be set to readable/writable by owner only. + """ + fp = open(file_path, 'w') + json.dump(self.to_dict(), fp) + fp.close() + os.chmod(file_path, 0o600) + + def is_expired(self, time_offset_seconds=0): + """ + Checks to see if the Session Token is expired or not. By default + it will check to see if the Session Token is expired as of the + moment the method is called. However, you can supply an + optional parameter which is the number of seconds of offset + into the future for the check. For example, if you supply + a value of 5, this method will return a True if the Session + Token will be expired 5 seconds from this moment. + + :type time_offset_seconds: int + :param time_offset_seconds: The number of seconds into the future + to test the Session Token for expiration. + """ + now = datetime.datetime.utcnow() + if time_offset_seconds: + now = now + datetime.timedelta(seconds=time_offset_seconds) + ts = boto.utils.parse_ts(self.expiration) + delta = ts - now + return delta.total_seconds() <= 0 + + +class FederationToken(object): + """ + :ivar credentials: A Credentials object containing the credentials. + :ivar federated_user_arn: ARN specifying federated user using credentials. + :ivar federated_user_id: The ID of the federated user using credentials. + :ivar packed_policy_size: A percentage value indicating the size of + the policy in packed form + """ + + def __init__(self, parent=None): + self.parent = parent + self.credentials = None + self.federated_user_arn = None + self.federated_user_id = None + self.packed_policy_size = None + self.request_id = None + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + else: + return None + + def endElement(self, name, value, connection): + if name == 'Arn': + self.federated_user_arn = value + elif name == 'FederatedUserId': + self.federated_user_id = value + elif name == 'PackedPolicySize': + self.packed_policy_size = int(value) + elif name == 'RequestId': + self.request_id = value + else: + pass + + +class AssumedRole(object): + """ + :ivar user: The assumed role user. + :ivar credentials: A Credentials object containing the credentials. + """ + def __init__(self, connection=None, credentials=None, user=None): + self._connection = connection + self.credentials = credentials + self.user = user + + def startElement(self, name, attrs, connection): + if name == 'Credentials': + self.credentials = Credentials() + return self.credentials + elif name == 'AssumedRoleUser': + self.user = User() + return self.user + + def endElement(self, name, value, connection): + pass + + +class User(object): + """ + :ivar arn: The arn of the user assuming the role. + :ivar assume_role_id: The identifier of the assumed role. + """ + def __init__(self, arn=None, assume_role_id=None): + self.arn = arn + self.assume_role_id = assume_role_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Arn': + self.arn = value + elif name == 'AssumedRoleId': + self.assume_role_id = value + + +class DecodeAuthorizationMessage(object): + """ + :ivar request_id: The request ID. + :ivar decoded_message: The decoded authorization message (may be JSON). + """ + def __init__(self, request_id=None, decoded_message=None): + self.request_id = request_id + self.decoded_message = decoded_message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'requestId': + self.request_id = value + elif name == 'DecodedMessage': + self.decoded_message = value diff --git a/ext/boto/support/__init__.py b/ext/boto/support/__init__.py new file mode 100644 index 0000000000..2470961a7c --- /dev/null +++ b/ext/boto/support/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(): + """ + Get all available regions for the Amazon Support service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.support.layer1 import SupportConnection + return get_regions('support', connection_cls=SupportConnection) + + +def connect_to_region(region_name, **kw_params): + from boto.support.layer1 import SupportConnection + return connect('support', region_name, + connection_cls=SupportConnection, **kw_params) diff --git a/ext/boto/support/exceptions.py b/ext/boto/support/exceptions.py new file mode 100644 index 0000000000..cbc19b3a2d --- /dev/null +++ b/ext/boto/support/exceptions.py @@ -0,0 +1,58 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.exception import JSONResponseError + + +class CaseIdNotFound(JSONResponseError): + pass + + +class CaseCreationLimitExceeded(JSONResponseError): + pass + + +class InternalServerError(JSONResponseError): + pass + + +class AttachmentLimitExceeded(JSONResponseError): + pass + + +class DescribeAttachmentLimitExceeded(JSONResponseError): + pass + + +class AttachmentSetIdNotFound(JSONResponseError): + pass + + +class AttachmentSetExpired(JSONResponseError): + pass + + +class AttachmentIdNotFound(JSONResponseError): + pass + + +class AttachmentSetSizeLimitExceeded(JSONResponseError): + pass diff --git a/ext/boto/support/layer1.py b/ext/boto/support/layer1.py new file mode 100644 index 0000000000..33e83cc472 --- /dev/null +++ b/ext/boto/support/layer1.py @@ -0,0 +1,674 @@ +# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.compat import json +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.support import exceptions + + +class SupportConnection(AWSQueryConnection): + """ + AWS Support + The AWS Support API reference is intended for programmers who need + detailed information about the AWS Support operations and data + types. This service enables you to manage your AWS Support cases + programmatically. It uses HTTP methods that return results in JSON + format. + + The AWS Support service also exposes a set of `Trusted Advisor`_ + features. You can retrieve a list of checks and their + descriptions, get check results, specify checks to refresh, and + get the refresh status of checks. + + The following list describes the AWS Support case management + operations: + + + + **Service names, issue categories, and available severity + levels. **The DescribeServices and DescribeSeverityLevels + operations return AWS service names, service codes, service + categories, and problem severity levels. You use these values when + you call the CreateCase operation. + + **Case creation, case details, and case resolution.** The + CreateCase, DescribeCases, DescribeAttachment, and ResolveCase + operations create AWS Support cases, retrieve information about + cases, and resolve cases. + + **Case communication.** The DescribeCommunications, + AddCommunicationToCase, and AddAttachmentsToSet operations + retrieve and add communications and attachments to AWS Support + cases. + + + The following list describes the operations available from the AWS + Support service for Trusted Advisor: + + + + DescribeTrustedAdvisorChecks returns the list of checks that run + against your AWS resources. + + Using the `CheckId` for a specific check returned by + DescribeTrustedAdvisorChecks, you can call + DescribeTrustedAdvisorCheckResult to obtain the results for the + check you specified. + + DescribeTrustedAdvisorCheckSummaries returns summarized results + for one or more Trusted Advisor checks. + + RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a + specified check. + + DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh + status of one or more checks. + + + For authentication of requests, AWS Support uses `Signature + Version 4 Signing Process`_. + + See `About the AWS Support API`_ in the AWS Support User Guide for + information about how to use this service to create and manage + your support cases, and how to call Trusted Advisor for results of + checks on your resources. + """ + APIVersion = "2013-04-15" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "support.us-east-1.amazonaws.com" + ServiceName = "Support" + TargetPrefix = "AWSSupport_20130415" + ResponseError = JSONResponseError + + _faults = { + "CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded, + "AttachmentLimitExceeded": exceptions.AttachmentLimitExceeded, + "CaseIdNotFound": exceptions.CaseIdNotFound, + "DescribeAttachmentLimitExceeded": exceptions.DescribeAttachmentLimitExceeded, + "AttachmentSetIdNotFound": exceptions.AttachmentSetIdNotFound, + "InternalServerError": exceptions.InternalServerError, + "AttachmentSetExpired": exceptions.AttachmentSetExpired, + "AttachmentIdNotFound": exceptions.AttachmentIdNotFound, + "AttachmentSetSizeLimitExceeded": exceptions.AttachmentSetSizeLimitExceeded, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs or kwargs['host'] is None: + kwargs['host'] = region.endpoint + + super(SupportConnection, self).__init__(**kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def add_attachments_to_set(self, attachments, attachment_set_id=None): + """ + Adds one or more attachments to an attachment set. If an + `AttachmentSetId` is not specified, a new attachment set is + created, and the ID of the set is returned in the response. If + an `AttachmentSetId` is specified, the attachments are added + to the specified set, if it exists. + + An attachment set is a temporary container for attachments + that are to be added to a case or case communication. The set + is available for one hour after it is created; the + `ExpiryTime` returned in the response indicates when the set + expires. The maximum number of attachments in a set is 3, and + the maximum size of any attachment in the set is 5 MB. + + :type attachment_set_id: string + :param attachment_set_id: The ID of the attachment set. If an + `AttachmentSetId` is not specified, a new attachment set is + created, and the ID of the set is returned in the response. If an + `AttachmentSetId` is specified, the attachments are added to the + specified set, if it exists. + + :type attachments: list + :param attachments: One or more attachments to add to the set. The + limit is 3 attachments per set, and the size limit is 5 MB per + attachment. + + """ + params = {'attachments': attachments, } + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='AddAttachmentsToSet', + body=json.dumps(params)) + + def add_communication_to_case(self, communication_body, case_id=None, + cc_email_addresses=None, + attachment_set_id=None): + """ + Adds additional customer communication to an AWS Support case. + You use the `CaseId` value to identify the case to add + communication to. You can list a set of email addresses to + copy on the communication using the `CcEmailAddresses` value. + The `CommunicationBody` value contains the text of the + communication. + + The response indicates the success or failure of the request. + + This operation implements a subset of the behavior on the AWS + Support `Your Support Cases`_ web form. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + :type communication_body: string + :param communication_body: The body of an email communication to add to + the support case. + + :type cc_email_addresses: list + :param cc_email_addresses: The email addresses in the CC line of an + email to be added to the support case. + + :type attachment_set_id: string + :param attachment_set_id: The ID of a set of one or more attachments + for the communication to add to the case. Create the set by calling + AddAttachmentsToSet + + """ + params = {'communicationBody': communication_body, } + if case_id is not None: + params['caseId'] = case_id + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='AddCommunicationToCase', + body=json.dumps(params)) + + def create_case(self, subject, communication_body, service_code=None, + severity_code=None, category_code=None, + cc_email_addresses=None, language=None, issue_type=None, + attachment_set_id=None): + """ + Creates a new case in the AWS Support Center. This operation + is modeled on the behavior of the AWS Support Center `Open a + new case`_ page. Its parameters require you to specify the + following information: + + + #. **IssueType.** The type of issue for the case. You can + specify either "customer-service" or "technical." If you do + not indicate a value, the default is "technical." + #. **ServiceCode.** The code for an AWS service. You obtain + the `ServiceCode` by calling DescribeServices. + #. **CategoryCode.** The category for the service defined for + the `ServiceCode` value. You also obtain the category code for + a service by calling DescribeServices. Each AWS service + defines its own set of category codes. + #. **SeverityCode.** A value that indicates the urgency of the + case, which in turn determines the response time according to + your service level agreement with AWS Support. You obtain the + SeverityCode by calling DescribeSeverityLevels. + #. **Subject.** The **Subject** field on the AWS Support + Center `Open a new case`_ page. + #. **CommunicationBody.** The **Description** field on the AWS + Support Center `Open a new case`_ page. + #. **AttachmentSetId.** The ID of a set of attachments that + has been created by using AddAttachmentsToSet. + #. **Language.** The human language in which AWS Support + handles the case. English and Japanese are currently + supported. + #. **CcEmailAddresses.** The AWS Support Center **CC** field + on the `Open a new case`_ page. You can list email addresses + to be copied on any correspondence about the case. The account + that opens the case is already identified by passing the AWS + Credentials in the HTTP POST method or in a method or function + call from one of the programming languages supported by an + `AWS SDK`_. + + + A successful CreateCase request returns an AWS Support case + number. Case numbers are used by the DescribeCases operation + to retrieve existing AWS Support cases. + + :type subject: string + :param subject: The title of the AWS Support case. + + :type service_code: string + :param service_code: The code for the AWS service returned by the call + to DescribeServices. + + :type severity_code: string + :param severity_code: The code for the severity level returned by the + call to DescribeSeverityLevels. + + :type category_code: string + :param category_code: The category of problem for the AWS Support case. + + :type communication_body: string + :param communication_body: The communication body text when you create + an AWS Support case by calling CreateCase. + + :type cc_email_addresses: list + :param cc_email_addresses: A list of email addresses that AWS Support + copies on case correspondence. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + :type issue_type: string + :param issue_type: The type of issue for the case. You can specify + either "customer-service" or "technical." If you do not indicate a + value, the default is "technical." + + :type attachment_set_id: string + :param attachment_set_id: The ID of a set of one or more attachments + for the case. Create the set by using AddAttachmentsToSet. + + """ + params = { + 'subject': subject, + 'communicationBody': communication_body, + } + if service_code is not None: + params['serviceCode'] = service_code + if severity_code is not None: + params['severityCode'] = severity_code + if category_code is not None: + params['categoryCode'] = category_code + if cc_email_addresses is not None: + params['ccEmailAddresses'] = cc_email_addresses + if language is not None: + params['language'] = language + if issue_type is not None: + params['issueType'] = issue_type + if attachment_set_id is not None: + params['attachmentSetId'] = attachment_set_id + return self.make_request(action='CreateCase', + body=json.dumps(params)) + + def describe_attachment(self, attachment_id): + """ + Returns the attachment that has the specified ID. Attachment + IDs are generated by the case management system when you add + an attachment to a case or case communication. Attachment IDs + are returned in the AttachmentDetails objects that are + returned by the DescribeCommunications operation. + + :type attachment_id: string + :param attachment_id: The ID of the attachment to return. Attachment + IDs are returned by the DescribeCommunications operation. + + """ + params = {'attachmentId': attachment_id, } + return self.make_request(action='DescribeAttachment', + body=json.dumps(params)) + + def describe_cases(self, case_id_list=None, display_id=None, + after_time=None, before_time=None, + include_resolved_cases=None, next_token=None, + max_results=None, language=None, + include_communications=None): + """ + Returns a list of cases that you specify by passing one or + more case IDs. In addition, you can filter the cases by date + by setting values for the `AfterTime` and `BeforeTime` request + parameters. + + Case data is available for 12 months after creation. If a case + was created more than 12 months ago, a request for data might + cause an error. + + The response returns the following in JSON format: + + + #. One or more CaseDetails data types. + #. One or more `NextToken` values, which specify where to + paginate the returned records represented by the `CaseDetails` + objects. + + :type case_id_list: list + :param case_id_list: A list of ID numbers of the support cases you want + returned. The maximum number of cases is 100. + + :type display_id: string + :param display_id: The ID displayed for a case in the AWS Support + Center user interface. + + :type after_time: string + :param after_time: The start date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type before_time: string + :param before_time: The end date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type include_resolved_cases: boolean + :param include_resolved_cases: Specifies whether resolved support cases + should be included in the DescribeCases results. The default is + false . + + :type next_token: string + :param next_token: A resumption point for pagination. + + :type max_results: integer + :param max_results: The maximum number of results to return before + paginating. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + :type include_communications: boolean + :param include_communications: Specifies whether communications should + be included in the DescribeCases results. The default is true . + + """ + params = {} + if case_id_list is not None: + params['caseIdList'] = case_id_list + if display_id is not None: + params['displayId'] = display_id + if after_time is not None: + params['afterTime'] = after_time + if before_time is not None: + params['beforeTime'] = before_time + if include_resolved_cases is not None: + params['includeResolvedCases'] = include_resolved_cases + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + if language is not None: + params['language'] = language + if include_communications is not None: + params['includeCommunications'] = include_communications + return self.make_request(action='DescribeCases', + body=json.dumps(params)) + + def describe_communications(self, case_id, before_time=None, + after_time=None, next_token=None, + max_results=None): + """ + Returns communications (and attachments) for one or more + support cases. You can use the `AfterTime` and `BeforeTime` + parameters to filter by date. You can use the `CaseId` + parameter to restrict the results to a particular case. + + Case data is available for 12 months after creation. If a case + was created more than 12 months ago, a request for data might + cause an error. + + You can use the `MaxResults` and `NextToken` parameters to + control the pagination of the result set. Set `MaxResults` to + the number of cases you want displayed on each page, and use + `NextToken` to specify the resumption of pagination. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + :type before_time: string + :param before_time: The end date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type after_time: string + :param after_time: The start date for a filtered date search on support + case communications. Case communications are available for 12 + months after creation. + + :type next_token: string + :param next_token: A resumption point for pagination. + + :type max_results: integer + :param max_results: The maximum number of results to return before + paginating. + + """ + params = {'caseId': case_id, } + if before_time is not None: + params['beforeTime'] = before_time + if after_time is not None: + params['afterTime'] = after_time + if next_token is not None: + params['nextToken'] = next_token + if max_results is not None: + params['maxResults'] = max_results + return self.make_request(action='DescribeCommunications', + body=json.dumps(params)) + + def describe_services(self, service_code_list=None, language=None): + """ + Returns the current list of AWS services and a list of service + categories that applies to each one. You then use service + names and categories in your CreateCase requests. Each AWS + service has its own set of categories. + + The service codes and category codes correspond to the values + that are displayed in the **Service** and **Category** drop- + down lists on the AWS Support Center `Open a new case`_ page. + The values in those fields, however, do not necessarily match + the service codes and categories returned by the + `DescribeServices` request. Always use the service codes and + categories obtained programmatically. This practice ensures + that you always have the most recent set of service and + category codes. + + :type service_code_list: list + :param service_code_list: A JSON-formatted list of service codes + available for AWS services. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {} + if service_code_list is not None: + params['serviceCodeList'] = service_code_list + if language is not None: + params['language'] = language + return self.make_request(action='DescribeServices', + body=json.dumps(params)) + + def describe_severity_levels(self, language=None): + """ + Returns the list of severity levels that you can assign to an + AWS Support case. The severity level for a case is also a + field in the CaseDetails data type included in any CreateCase + request. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {} + if language is not None: + params['language'] = language + return self.make_request(action='DescribeSeverityLevels', + body=json.dumps(params)) + + def describe_trusted_advisor_check_refresh_statuses(self, check_ids): + """ + Returns the refresh status of the Trusted Advisor checks that + have the specified check IDs. Check IDs can be obtained by + calling DescribeTrustedAdvisorChecks. + + :type check_ids: list + :param check_ids: The IDs of the Trusted Advisor checks. + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses', + body=json.dumps(params)) + + def describe_trusted_advisor_check_result(self, check_id, language=None): + """ + Returns the results of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a TrustedAdvisorCheckResult object, + which contains these three objects: + + + + TrustedAdvisorCategorySpecificSummary + + TrustedAdvisorResourceDetail + + TrustedAdvisorResourcesSummary + + + In addition, the response contains these fields: + + + + **Status.** The alert status of the check: "ok" (green), + "warning" (yellow), "error" (red), or "not_available". + + **Timestamp.** The time of the last refresh of the check. + + **CheckId.** The unique identifier for the check. + + :type check_id: string + :param check_id: The unique identifier for the Trusted Advisor check. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {'checkId': check_id, } + if language is not None: + params['language'] = language + return self.make_request(action='DescribeTrustedAdvisorCheckResult', + body=json.dumps(params)) + + def describe_trusted_advisor_check_summaries(self, check_ids): + """ + Returns the summaries of the results of the Trusted Advisor + checks that have the specified check IDs. Check IDs can be + obtained by calling DescribeTrustedAdvisorChecks. + + The response contains an array of TrustedAdvisorCheckSummary + objects. + + :type check_ids: list + :param check_ids: The IDs of the Trusted Advisor checks. + + """ + params = {'checkIds': check_ids, } + return self.make_request(action='DescribeTrustedAdvisorCheckSummaries', + body=json.dumps(params)) + + def describe_trusted_advisor_checks(self, language): + """ + Returns information about all available Trusted Advisor + checks, including name, ID, category, description, and + metadata. You must specify a language code; English ("en") and + Japanese ("ja") are currently supported. The response contains + a TrustedAdvisorCheckDescription for each check. + + :type language: string + :param language: The ISO 639-1 code for the language in which AWS + provides support. AWS Support currently supports English ("en") and + Japanese ("ja"). Language parameters must be passed explicitly for + operations that take them. + + """ + params = {'language': language, } + return self.make_request(action='DescribeTrustedAdvisorChecks', + body=json.dumps(params)) + + def refresh_trusted_advisor_check(self, check_id): + """ + Requests a refresh of the Trusted Advisor check that has the + specified check ID. Check IDs can be obtained by calling + DescribeTrustedAdvisorChecks. + + The response contains a RefreshTrustedAdvisorCheckResult + object, which contains these fields: + + + + **Status.** The refresh status of the check: "none", + "enqueued", "processing", "success", or "abandoned". + + **MillisUntilNextRefreshable.** The amount of time, in + milliseconds, until the check is eligible for refresh. + + **CheckId.** The unique identifier for the check. + + :type check_id: string + :param check_id: The unique identifier for the Trusted Advisor check. + + """ + params = {'checkId': check_id, } + return self.make_request(action='RefreshTrustedAdvisorCheck', + body=json.dumps(params)) + + def resolve_case(self, case_id=None): + """ + Takes a `CaseId` and returns the initial state of the case + along with the state of the case after the call to ResolveCase + completed. + + :type case_id: string + :param case_id: The AWS Support case ID requested or returned in the + call. The case ID is an alphanumeric string formatted as shown in + this example: case- 12345678910-2013-c4c1d2bf33c5cf47 + + """ + params = {} + if case_id is not None: + params['caseId'] = case_id + return self.make_request(action='ResolveCase', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) diff --git a/ext/boto/swf/__init__.py b/ext/boto/swf/__init__.py new file mode 100644 index 0000000000..a4ce01087c --- /dev/null +++ b/ext/boto/swf/__init__.py @@ -0,0 +1,45 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.regioninfo import RegionInfo +from boto.regioninfo import get_regions, load_regions +from boto.regioninfo import connect +import boto.swf.layer1 + +REGION_ENDPOINTS = load_regions().get('swf', {}) + + +def regions(**kw_params): + """ + Get all available regions for the Amazon Simple Workflow service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + return get_regions('swf', connection_cls=boto.swf.layer1.Layer1) + + +def connect_to_region(region_name, **kw_params): + return connect('swf', region_name, + connection_cls=boto.swf.layer1.Layer1, **kw_params) diff --git a/ext/boto/swf/exceptions.py b/ext/boto/swf/exceptions.py new file mode 100644 index 0000000000..f3ac6aeb73 --- /dev/null +++ b/ext/boto/swf/exceptions.py @@ -0,0 +1,44 @@ +""" +Exceptions that are specific to the swf module. + +This module subclasses the base SWF response exception, +boto.exceptions.SWFResponseError, for some of the SWF specific faults. +""" +from boto.exception import SWFResponseError + + +class SWFDomainAlreadyExistsError(SWFResponseError): + """ + Raised when when the domain already exists. + """ + pass + + +class SWFLimitExceededError(SWFResponseError): + """ + Raised when when a system imposed limitation has been reached. + """ + pass + + +class SWFOperationNotPermittedError(SWFResponseError): + """ + Raised when (reserved for future use). + """ + + +class SWFTypeAlreadyExistsError(SWFResponseError): + """ + Raised when when the workflow type or activity type already exists. + """ + pass + + +class SWFWorkflowExecutionAlreadyStartedError(SWFResponseError): + """ + Raised when an open execution with the same workflow_id is already running + in the specified domain. + """ + + + diff --git a/ext/boto/swf/layer1.py b/ext/boto/swf/layer1.py new file mode 100644 index 0000000000..0264befe4c --- /dev/null +++ b/ext/boto/swf/layer1.py @@ -0,0 +1,1513 @@ +# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import time + +import boto +from boto.connection import AWSAuthConnection +from boto.provider import Provider +from boto.exception import SWFResponseError +from boto.swf import exceptions as swf_exceptions +from boto.compat import json + +# +# To get full debug output, uncomment the following line and set the +# value of Debug to be 2 +# +#boto.set_stream_logger('swf') +Debug = 0 + + +class Layer1(AWSAuthConnection): + """ + Low-level interface to Simple WorkFlow Service. + """ + + DefaultRegionName = 'us-east-1' + """The default region name for Simple Workflow.""" + + ServiceName = 'com.amazonaws.swf.service.model.SimpleWorkflowService' + """The name of the Service""" + + # In some cases, the fault response __type value is mapped to + # an exception class more specific than SWFResponseError. + _fault_excp = { + 'com.amazonaws.swf.base.model#DomainAlreadyExistsFault': + swf_exceptions.SWFDomainAlreadyExistsError, + 'com.amazonaws.swf.base.model#LimitExceededFault': + swf_exceptions.SWFLimitExceededError, + 'com.amazonaws.swf.base.model#OperationNotPermittedFault': + swf_exceptions.SWFOperationNotPermittedError, + 'com.amazonaws.swf.base.model#TypeAlreadyExistsFault': + swf_exceptions.SWFTypeAlreadyExistsError, + 'com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault': + swf_exceptions.SWFWorkflowExecutionAlreadyStartedError, + } + + ResponseError = SWFResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + debug=0, session_token=None, region=None, profile_name=None): + if not region: + region_name = boto.config.get('SWF', 'region', + self.DefaultRegionName) + for reg in boto.swf.regions(): + if reg.name == region_name: + region = reg + break + + self.region = region + super(Layer1, self).__init__(self.region.endpoint, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, + debug, session_token, profile_name=profile_name) + + def _required_auth_capability(self): + return ['hmac-v4'] + + @classmethod + def _normalize_request_dict(cls, data): + """ + This class method recurses through request data dictionary and removes + any default values. + + :type data: dict + :param data: Specifies request parameters with default values to be removed. + """ + for item in list(data.keys()): + if isinstance(data[item], dict): + cls._normalize_request_dict(data[item]) + if data[item] in (None, {}): + del data[item] + + def json_request(self, action, data, object_hook=None): + """ + This method wraps around make_request() to normalize and serialize the + dictionary with request parameters. + + :type action: string + :param action: Specifies an SWF action. + + :type data: dict + :param data: Specifies request parameters associated with the action. + """ + self._normalize_request_dict(data) + json_input = json.dumps(data) + return self.make_request(action, json_input, object_hook) + + def make_request(self, action, body='', object_hook=None): + """ + :raises: ``SWFResponseError`` if response status is not 200. + """ + headers = {'X-Amz-Target': '%s.%s' % (self.ServiceName, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/json; charset=UTF-8', + 'Content-Encoding': 'amz-1.0', + 'Content-Length': str(len(body))} + http_request = self.build_base_http_request('POST', '/', '/', + {}, headers, body, None) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read().decode('utf-8') + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body, object_hook=object_hook) + else: + return None + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + # Certain faults get mapped to more specific exception classes. + excp_cls = self._fault_excp.get(fault_name, self.ResponseError) + raise excp_cls(response.status, response.reason, body=json_body) + + # Actions related to Activities + + def poll_for_activity_task(self, domain, task_list, identity=None): + """ + Used by workers to get an ActivityTask from the specified + activity taskList. This initiates a long poll, where the + service holds the HTTP connection open and responds as soon as + a task becomes available. The maximum time the service holds + on to the request before responding is 60 seconds. If no task + is available within 60 seconds, the poll will return an empty + result. An empty result, in this context, means that an + ActivityTask is returned, but that the value of taskToken is + an empty string. If a task is returned, the worker should use + its type to identify and process it correctly. + + :type domain: string + :param domain: The name of the domain that contains the task + lists being polled. + + :type task_list: string + :param task_list: Specifies the task list to poll for activity tasks. + + :type identity: string + :param identity: Identity of the worker making the request, which + is recorded in the ActivityTaskStarted event in the workflow + history. This enables diagnostic tracing when problems arise. + The form of this identity is user defined. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForActivityTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + }) + + def respond_activity_task_completed(self, task_token, result=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken completed successfully with a + result (if provided). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type result: string + :param result: The result of the activity task. It is a free + form string that is implementation specific. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCompleted', { + 'taskToken': task_token, + 'result': result, + }) + + def respond_activity_task_failed(self, task_token, + details=None, reason=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken has failed with reason (if + specified). + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :type reason: string + :param reason: Description of the error that may assist in diagnostics. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskFailed', { + 'taskToken': task_token, + 'details': details, + 'reason': reason, + }) + + def respond_activity_task_canceled(self, task_token, details=None): + """ + Used by workers to tell the service that the ActivityTask + identified by the taskToken was successfully + canceled. Additional details can be optionally provided using + the details argument. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: Optional detailed information about the failure. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondActivityTaskCanceled', { + 'taskToken': task_token, + 'details': details, + }) + + def record_activity_task_heartbeat(self, task_token, details=None): + """ + Used by activity workers to report to the service that the + ActivityTask represented by the specified taskToken is still + making progress. The worker can also (optionally) specify + details of the progress, for example percent complete, using + the details parameter. This action can also be used by the + worker as a mechanism to check if cancellation is being + requested for the activity task. If a cancellation is being + attempted for the specified task, then the boolean + cancelRequested flag returned by the service is set to true. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type details: string + :param details: If specified, contains details about the + progress of the task. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RecordActivityTaskHeartbeat', { + 'taskToken': task_token, + 'details': details, + }) + + # Actions related to Deciders + + def poll_for_decision_task(self, domain, task_list, identity=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Used by deciders to get a DecisionTask from the specified + decision taskList. A decision task may be returned for any + open workflow execution that is using the specified task + list. The task includes a paginated view of the history of the + workflow execution. The decider should use the workflow type + and the history to determine how to properly handle the task. + + :type domain: string + :param domain: The name of the domain containing the task + lists to poll. + + :type task_list: string + :param task_list: Specifies the task list to poll for decision tasks. + + :type identity: string + :param identity: Identity of the decider making the request, + which is recorded in the DecisionTaskStarted event in the + workflow history. This enables diagnostic tracing when + problems arise. The form of this identity is user defined. + + :type maximum_page_size: integer :param maximum_page_size: The + maximum number of history events returned in each page. The + default is 100, but the caller can override this value to a + page size smaller than the default. You cannot specify a page + size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being paginated. + To get the next page of results, repeat the call with the + returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimestamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('PollForDecisionTask', { + 'domain': domain, + 'taskList': {'name': task_list}, + 'identity': identity, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def respond_decision_task_completed(self, task_token, + decisions=None, + execution_context=None): + """ + Used by deciders to tell the service that the DecisionTask + identified by the taskToken has successfully completed. + The decisions argument specifies the list of decisions + made while processing the task. + + :type task_token: string + :param task_token: The taskToken of the ActivityTask. + + :type decisions: list + :param decisions: The list of decisions (possibly empty) made by + the decider while processing this decision task. See the docs + for the Decision structure for details. + + :type execution_context: string + :param execution_context: User defined context to add to + workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RespondDecisionTaskCompleted', { + 'taskToken': task_token, + 'decisions': decisions, + 'executionContext': execution_context, + }) + + def request_cancel_workflow_execution(self, domain, workflow_id, + run_id=None): + """ + Records a WorkflowExecutionCancelRequested event in the + currently running workflow execution identified by the given + domain, workflowId, and runId. This logically requests the + cancellation of the workflow execution as a whole. It is up to + the decider to take appropriate actions when it receives an + execution history with this event. + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to cancel. + + :type run_id: string + :param run_id: The runId of the workflow execution to cancel. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to cancel. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RequestCancelWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'runId': run_id, + }) + + def start_workflow_execution(self, domain, workflow_id, + workflow_name, workflow_version, + task_list=None, child_policy=None, + execution_start_to_close_timeout=None, + input=None, tag_list=None, + task_start_to_close_timeout=None): + """ + Starts an execution of the workflow type in the specified + domain using the provided workflowId and input data. + + :type domain: string + :param domain: The name of the domain in which the workflow + execution is created. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated with + the workflow execution. You can use this to associate a + custom identifier with the workflow execution. You may + specify the same identifier if a workflow execution is + logically a restart of a previous execution. You cannot + have two open workflow executions with the same workflowId + at the same time. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :type task_list: string + :param task_list: The task list to use for the decision tasks + generated for this workflow execution. This overrides the + defaultTaskList specified when registering the workflow type. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for the + child workflow executions of this workflow execution if it + is terminated, by calling the TerminateWorkflowExecution + action explicitly or due to an expired timeout. This policy + overrides the default child policy specified when registering + the workflow type using RegisterWorkflowType. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its history. + It is up to the decider to take appropriate actions + when it receives an execution history with this event. + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type execution_start_to_close_timeout: string + :param execution_start_to_close_timeout: The total duration for + this workflow execution. This overrides the + defaultExecutionStartToCloseTimeout specified when + registering the workflow type. + + :type input: string + :param input: The input for the workflow + execution. This is a free form string which should be + meaningful to the workflow you are starting. This input is + made available to the new workflow execution in the + WorkflowExecutionStarted history event. + + :type tag_list: list :param tag_list: The list of tags to + associate with the workflow execution. You can specify a + maximum of 5 tags. You can list workflow executions with a + specific tag by calling list_open_workflow_executions or + list_closed_workflow_executions and specifying a TagFilter. + + :type task_start_to_close_timeout: string :param + task_start_to_close_timeout: Specifies the maximum duration of + decision tasks for this workflow execution. This parameter + overrides the defaultTaskStartToCloseTimout specified when + registering the workflow type using register_workflow_type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFWorkflowExecutionAlreadyStartedError, SWFLimitExceededError, + SWFOperationNotPermittedError, DefaultUndefinedFault + """ + return self.json_request('StartWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + 'taskList': {'name': task_list}, + 'childPolicy': child_policy, + 'executionStartToCloseTimeout': execution_start_to_close_timeout, + 'input': input, + 'tagList': tag_list, + 'taskStartToCloseTimeout': task_start_to_close_timeout, + + }) + + def signal_workflow_execution(self, domain, signal_name, workflow_id, + input=None, run_id=None): + """ + Records a WorkflowExecutionSignaled event in the workflow + execution history and creates a decision task for the workflow + execution identified by the given domain, workflowId and + runId. The event is recorded with the specified user defined + signalName and input (if provided). + + :type domain: string + :param domain: The name of the domain containing the workflow + execution to signal. + + :type signal_name: string + :param signal_name: The name of the signal. This name must be + meaningful to the target workflow. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to signal. + + :type input: string + :param input: Data to attach to the WorkflowExecutionSignaled + event in the target workflow execution's history. + + :type run_id: string + :param run_id: The runId of the workflow execution to signal. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('SignalWorkflowExecution', { + 'domain': domain, + 'signalName': signal_name, + 'workflowId': workflow_id, + 'input': input, + 'runId': run_id, + }) + + def terminate_workflow_execution(self, domain, workflow_id, + child_policy=None, details=None, + reason=None, run_id=None): + """ + Records a WorkflowExecutionTerminated event and forces closure + of the workflow execution identified by the given domain, + runId, and workflowId. The child policy, registered with the + workflow type or specified when starting this execution, is + applied to any open child workflow executions of this workflow + execution. + + :type domain: string + :param domain: The domain of the workflow execution to terminate. + + :type workflow_id: string + :param workflow_id: The workflowId of the workflow execution + to terminate. + + :type child_policy: string + :param child_policy: If set, specifies the policy to use for + the child workflow executions of the workflow execution being + terminated. This policy overrides the child policy specified + for the workflow execution at registration time or when + starting the execution. The supported child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run. + + :type details: string + :param details: Optional details for terminating the + workflow execution. + + :type reason: string + :param reason: An optional descriptive reason for terminating + the workflow execution. + + :type run_id: string + :param run_id: The runId of the workflow execution to terminate. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('TerminateWorkflowExecution', { + 'domain': domain, + 'workflowId': workflow_id, + 'childPolicy': child_policy, + 'details': details, + 'reason': reason, + 'runId': run_id, + }) + +# Actions related to Administration + +## Activity Management + + def register_activity_type(self, domain, name, version, task_list=None, + default_task_heartbeat_timeout=None, + default_task_schedule_to_close_timeout=None, + default_task_schedule_to_start_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new activity type along with its configuration + settings in the specified domain. + + :type domain: string + :param domain: The name of the domain in which this activity is + to be registered. + + :type name: string + :param name: The name of the activity type within the domain. + + :type version: string + :param version: The version of the activity type. + + :type task_list: string + :param task_list: If set, specifies the default task list to + use for scheduling tasks of this activity type. This default + task list is used if a task list is not provided when a task + is scheduled through the schedule_activity_task Decision. + + :type default_task_heartbeat_timeout: string + :param default_task_heartbeat_timeout: If set, specifies the + default maximum time before which a worker processing a task + of this type must report progress by calling + RecordActivityTaskHeartbeat. If the timeout is exceeded, the + activity task is automatically timed out. This default can be + overridden when scheduling an activity task using the + ScheduleActivityTask Decision. If the activity worker + subsequently attempts to record a heartbeat or returns a + result, the activity worker receives an UnknownResource + fault. In this case, Amazon SWF no longer considers the + activity task to be valid; the activity worker should clean up + the activity task.no docs + + :type default_task_schedule_to_close_timeout: string + :param default_task_schedule_to_close_timeout: If set, + specifies the default maximum duration for a task of this + activity type. This default can be overridden when scheduling + an activity task using the ScheduleActivityTask Decision.no + docs + + :type default_task_schedule_to_start_timeout: string + :param default_task_schedule_to_start_timeout: If set, + specifies the default maximum duration that a task of this + activity type can wait before being assigned to a worker. This + default can be overridden when scheduling an activity task + using the ScheduleActivityTask Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration that a worker can take to process + tasks of this activity type. This default can be overridden + when scheduling an activity task using the + ScheduleActivityTask Decision. + + :type description: string + :param description: A textual description of the activity type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterActivityType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultTaskHeartbeatTimeout': default_task_heartbeat_timeout, + 'defaultTaskScheduleToCloseTimeout': default_task_schedule_to_close_timeout, + 'defaultTaskScheduleToStartTimeout': default_task_schedule_to_start_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_activity_type(self, domain, activity_name, activity_version): + """ + Deprecates the specified activity type. After an activity + type has been deprecated, you cannot create new tasks of + that activity type. Tasks of this type that were scheduled + before the type was deprecated will continue to run. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Management + + def register_workflow_type(self, domain, name, version, + task_list=None, + default_child_policy=None, + default_execution_start_to_close_timeout=None, + default_task_start_to_close_timeout=None, + description=None): + """ + Registers a new workflow type and its configuration settings + in the specified domain. + + :type domain: string + :param domain: The name of the domain in which to register + the workflow type. + + :type name: string + :param name: The name of the workflow type. + + :type version: string + :param version: The version of the workflow type. + + :type task_list: list of name, version of tasks + :param task_list: If set, specifies the default task list to use + for scheduling decision tasks for executions of this workflow + type. This default is used only if a task list is not provided + when starting the execution through the StartWorkflowExecution + Action or StartChildWorkflowExecution Decision. + + :type default_child_policy: string + + :param default_child_policy: If set, specifies the default + policy to use for the child workflow executions when a + workflow execution of this type is terminated, by calling the + TerminateWorkflowExecution action explicitly or due to an + expired timeout. This default can be overridden when starting + a workflow execution using the StartWorkflowExecution action + or the StartChildWorkflowExecution Decision. The supported + child policies are: + + * TERMINATE: the child executions will be terminated. + + * REQUEST_CANCEL: a request to cancel will be attempted + for each child execution by recording a + WorkflowExecutionCancelRequested event in its + history. It is up to the decider to take appropriate + actions when it receives an execution history with this + event. + + * ABANDON: no action will be taken. The child executions + will continue to run.no docs + + :type default_execution_start_to_close_timeout: string + :param default_execution_start_to_close_timeout: If set, + specifies the default maximum duration for executions of this + workflow type. You can override this default when starting an + execution through the StartWorkflowExecution Action or + StartChildWorkflowExecution Decision. + + :type default_task_start_to_close_timeout: string + :param default_task_start_to_close_timeout: If set, specifies + the default maximum duration of decision tasks for this + workflow type. This default can be overridden when starting a + workflow execution using the StartWorkflowExecution action or + the StartChildWorkflowExecution Decision. + + :type description: string + :param description: Textual description of the workflow type. + + :raises: SWFTypeAlreadyExistsError, SWFLimitExceededError, + UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('RegisterWorkflowType', { + 'domain': domain, + 'name': name, + 'version': version, + 'defaultTaskList': {'name': task_list}, + 'defaultChildPolicy': default_child_policy, + 'defaultExecutionStartToCloseTimeout': default_execution_start_to_close_timeout, + 'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout, + 'description': description, + }) + + def deprecate_workflow_type(self, domain, workflow_name, workflow_version): + """ + Deprecates the specified workflow type. After a workflow type + has been deprecated, you cannot create new executions of that + type. Executions that were started before the type was + deprecated will continue to run. A deprecated workflow type + may still be used when calling visibility actions. + + :type domain: string + :param domain: The name of the domain in which the workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, TypeDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version}, + }) + +## Domain Management + + def register_domain(self, name, + workflow_execution_retention_period_in_days, + description=None): + """ + Registers a new domain. + + :type name: string + :param name: Name of the domain to register. The name must be unique. + + :type workflow_execution_retention_period_in_days: string + + :param workflow_execution_retention_period_in_days: Specifies + the duration *in days* for which the record (including the + history) of workflow executions in this domain should be kept + by the service. After the retention period, the workflow + execution will not be available in the results of visibility + calls. If a duration of NONE is specified, the records for + workflow executions in this domain are not retained at all. + + :type description: string + :param description: Textual description of the domain. + + :raises: SWFDomainAlreadyExistsError, SWFLimitExceededError, + SWFOperationNotPermittedError + """ + return self.json_request('RegisterDomain', { + 'name': name, + 'workflowExecutionRetentionPeriodInDays': workflow_execution_retention_period_in_days, + 'description': description, + }) + + def deprecate_domain(self, name): + """ + Deprecates the specified domain. After a domain has been + deprecated it cannot be used to create new workflow executions + or register new types. However, you can still use visibility + actions on this domain. Deprecating a domain also deprecates + all activity and workflow types registered in the + domain. Executions that were started before the domain was + deprecated will continue to run. + + :type name: string + :param name: The name of the domain to deprecate. + + :raises: UnknownResourceFault, DomainDeprecatedFault, + SWFOperationNotPermittedError + """ + return self.json_request('DeprecateDomain', {'name': name}) + +# Visibility Actions + +## Activity Visibility + + def list_activity_types(self, domain, registration_status, + name=None, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns information about all activities registered in the + specified domain that match the specified name and + registration status. The result includes information like + creation date, current status of the activity, etc. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain in which the activity + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, only lists the activity types that + have this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextResultToken was returned, the results have more than one + page. To get the next page of results, repeat the call with + the nextPageToken and keep all other arguments unchanged. + + :type reverse_order: boolean + + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the activity + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListActivityTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_activity_type(self, domain, activity_name, activity_version): + """ + Returns information about the specified activity type. This + includes configuration settings provided at registration time + as well as other general information about the type. + + :type domain: string + :param domain: The name of the domain in which the activity + type is registered. + + :type activity_name: string + :param activity_name: The name of this activity. + + :type activity_version: string + :param activity_version: The version of this activity. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeActivityType', { + 'domain': domain, + 'activityType': {'name': activity_name, + 'version': activity_version} + }) + +## Workflow Visibility + + def list_workflow_types(self, domain, registration_status, + maximum_page_size=None, name=None, + next_page_token=None, reverse_order=None): + """ + Returns information about workflow types in the specified + domain. The results may be split into multiple pages that can + be retrieved by making the call repeatedly. + + :type domain: string + :param domain: The name of the domain in which the workflow + types have been registered. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the activity types to list. Valid values are: + + * REGISTERED + * DEPRECATED + + :type name: string + :param name: If specified, lists the workflow type with this name. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the workflow + types. + + :raises: SWFOperationNotPermittedError, UnknownResourceFault + """ + return self.json_request('ListWorkflowTypes', { + 'domain': domain, + 'name': name, + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_workflow_type(self, domain, workflow_name, workflow_version): + """ + Returns information about the specified workflow type. This + includes configuration settings specified when the type was + registered and other information such as creation date, + current status, etc. + + :type domain: string + :param domain: The name of the domain in which this workflow + type is registered. + + :type workflow_name: string + :param workflow_name: The name of the workflow type. + + :type workflow_version: string + :param workflow_version: The version of the workflow type. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowType', { + 'domain': domain, + 'workflowType': {'name': workflow_name, + 'version': workflow_version} + }) + +## Workflow Execution Visibility + + def describe_workflow_execution(self, domain, run_id, workflow_id): + """ + Returns information about the specified workflow execution + including its type and some statistics. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeWorkflowExecution', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + }) + + def get_workflow_execution_history(self, domain, run_id, workflow_id, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the history of the specified workflow execution. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type domain: string + :param domain: The name of the domain containing the + workflow execution. + + :type run_id: string + :param run_id: A system generated unique identifier for the + workflow execution. + + :type workflow_id: string + :param workflow_id: The user defined identifier associated + with the workflow execution. + + :type maximum_page_size: integer + :param maximum_page_size: Specifies the maximum number of + history events returned in one page. The next page in the + result is identified by the NextPageToken returned. By default + 100 history events are returned in a page but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size larger than 100. + + :type next_page_token: string + :param next_page_token: If a NextPageToken is returned, the + result has more than one pages. To get the next page, repeat + the call and specify the nextPageToken with all other + arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the events in + reverse order. By default the results are returned in + ascending order of the eventTimeStamp of the events. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('GetWorkflowExecutionHistory', { + 'domain': domain, + 'execution': {'runId': run_id, + 'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_open_workflow_executions(self, domain, latest_date, oldest_date, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version and tag are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'tagFilter': {'tag': tag}, + }) + + def list_open_workflow_executions(self, domain, + oldest_date, + latest_date=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the list of open workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type latest_date: timestamp + :param latest_date: Specifies the latest start or close date + and time to return. + + :type oldest_date: timestamp + :param oldest_date: Specifies the oldest start or close date + and time to return. + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + + """ + return self.json_request('ListOpenWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': oldest_date, + 'latestDate': latest_date}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def count_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'executionFilter': {'workflowId': workflow_id} + }) + + def list_closed_workflow_executions(self, domain, + start_latest_date=None, + start_oldest_date=None, + close_latest_date=None, + close_oldest_date=None, + close_status=None, + tag=None, + workflow_id=None, + workflow_name=None, + workflow_version=None, + maximum_page_size=None, + next_page_token=None, + reverse_order=None): + """ + Returns the number of closed workflow executions within the + given domain that meet the specified filtering criteria. + + .. note: + close_status, workflow_id, workflow_name/workflow_version + and tag are mutually exclusive. You can specify at most + one of these in a request. + + .. note: + start_latest_date/start_oldest_date and + close_latest_date/close_oldest_date are mutually + exclusive. You can specify at most one of these in a request. + + :type domain: string + :param domain: The name of the domain containing the + workflow executions to count. + + :type start_latest_date: timestamp + :param start_latest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type start_oldest_date: timestamp + :param start_oldest_date: If specified, only workflow executions + that meet the start time criteria of the filter are counted. + + :type close_latest_date: timestamp + :param close_latest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_oldest_date: timestamp + :param close_oldest_date: If specified, only workflow executions + that meet the close time criteria of the filter are counted. + + :type close_status: string + :param close_status: The close status that must match the close status + of an execution for it to meet the criteria of this filter. + Valid values are: + + * COMPLETED + * FAILED + * CANCELED + * TERMINATED + * CONTINUED_AS_NEW + * TIMED_OUT + + :type tag: string + :param tag: If specified, only executions that have a tag + that matches the filter are counted. + + :type workflow_id: string + :param workflow_id: If specified, only workflow executions + matching the workflow_id are counted. + + :type workflow_name: string + :param workflow_name: Name of the workflow type to filter on. + + :type workflow_version: string + :param workflow_version: Version of the workflow type to filter on. + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the results are being + paginated. To get the next page of results, repeat the call + with the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + descending order of the start or the close time of the + executions. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('ListClosedWorkflowExecutions', { + 'domain': domain, + 'startTimeFilter': {'oldestDate': start_oldest_date, + 'latestDate': start_latest_date}, + 'closeTimeFilter': {'oldestDate': close_oldest_date, + 'latestDate': close_latest_date}, + 'executionFilter': {'workflowId': workflow_id}, + 'closeStatusFilter': {'status': close_status}, + 'tagFilter': {'tag': tag}, + 'typeFilter': {'name': workflow_name, + 'version': workflow_version}, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + +## Domain Visibility + + def list_domains(self, registration_status, + maximum_page_size=None, + next_page_token=None, reverse_order=None): + """ + Returns the list of domains registered in the account. The + results may be split into multiple pages. To retrieve + subsequent pages, make the call again using the nextPageToken + returned by the initial call. + + :type registration_status: string + :param registration_status: Specifies the registration status + of the domains to list. Valid Values: + + * REGISTERED + * DEPRECATED + + :type maximum_page_size: integer + :param maximum_page_size: The maximum number of results + returned in each page. The default is 100, but the caller can + override this value to a page size smaller than the + default. You cannot specify a page size greater than 100. + + :type next_page_token: string + :param next_page_token: If on a previous call to this method a + NextPageToken was returned, the result has more than one + page. To get the next page of results, repeat the call with + the returned token and all other arguments unchanged. + + :type reverse_order: boolean + :param reverse_order: When set to true, returns the results in + reverse order. By default the results are returned in + ascending alphabetical order of the name of the domains. + + :raises: SWFOperationNotPermittedError + """ + return self.json_request('ListDomains', { + 'registrationStatus': registration_status, + 'maximumPageSize': maximum_page_size, + 'nextPageToken': next_page_token, + 'reverseOrder': reverse_order, + }) + + def describe_domain(self, name): + """ + Returns information about the specified domain including + description and status. + + :type name: string + :param name: The name of the domain to describe. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('DescribeDomain', {'name': name}) + +## Task List Visibility + + def count_pending_decision_tasks(self, domain, task_list): + """ + Returns the estimated number of decision tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no decision task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingDecisionTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) + + def count_pending_activity_tasks(self, domain, task_list): + """ + Returns the estimated number of activity tasks in the + specified task list. The count returned is an approximation + and is not guaranteed to be exact. If you specify a task list + that no activity task was ever scheduled in then 0 will be + returned. + + :type domain: string + :param domain: The name of the domain that contains the task list. + + :type task_list: string + :param task_list: The name of the task list. + + :raises: UnknownResourceFault, SWFOperationNotPermittedError + """ + return self.json_request('CountPendingActivityTasks', { + 'domain': domain, + 'taskList': {'name': task_list} + }) diff --git a/ext/boto/swf/layer1_decisions.py b/ext/boto/swf/layer1_decisions.py new file mode 100644 index 0000000000..3f5f74af40 --- /dev/null +++ b/ext/boto/swf/layer1_decisions.py @@ -0,0 +1,287 @@ +""" +Helper class for creating decision responses. +""" + + +class Layer1Decisions(object): + """ + Use this object to build a list of decisions for a decision response. + Each method call will add append a new decision. Retrieve the list + of decisions from the _data attribute. + + """ + def __init__(self): + self._data = [] + + def schedule_activity_task(self, + activity_id, + activity_type_name, + activity_type_version, + task_list=None, + control=None, + heartbeat_timeout=None, + schedule_to_close_timeout=None, + schedule_to_start_timeout=None, + start_to_close_timeout=None, + input=None): + """ + Schedules an activity task. + + :type activity_id: string + :param activity_id: The activityId of the type of the activity + being scheduled. + + :type activity_type_name: string + :param activity_type_name: The name of the type of the activity + being scheduled. + + :type activity_type_version: string + :param activity_type_version: The version of the type of the + activity being scheduled. + + :type task_list: string + :param task_list: If set, specifies the name of the task list in + which to schedule the activity task. If not specified, the + defaultTaskList registered with the activity type will be used. + Note: a task list for this activity task must be specified either + as a default for the activity type or through this field. If + neither this field is set nor a default task list was specified + at registration time then a fault will be returned. + """ + o = {} + o['decisionType'] = 'ScheduleActivityTask' + attrs = o['scheduleActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + attrs['activityType'] = { + 'name': activity_type_name, + 'version': activity_type_version, + } + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if control is not None: + attrs['control'] = control + if heartbeat_timeout is not None: + attrs['heartbeatTimeout'] = heartbeat_timeout + if schedule_to_close_timeout is not None: + attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout + if schedule_to_start_timeout is not None: + attrs['scheduleToStartTimeout'] = schedule_to_start_timeout + if start_to_close_timeout is not None: + attrs['startToCloseTimeout'] = start_to_close_timeout + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_activity_task(self, activity_id): + """ + Attempts to cancel a previously scheduled activity task. If + the activity task was scheduled but has not been assigned to a + worker, then it will be canceled. If the activity task was + already assigned to a worker, then the worker will be informed + that cancellation has been requested in the response to + RecordActivityTaskHeartbeat. + """ + o = {} + o['decisionType'] = 'RequestCancelActivityTask' + attrs = o['requestCancelActivityTaskDecisionAttributes'] = {} + attrs['activityId'] = activity_id + self._data.append(o) + + def record_marker(self, marker_name, details=None): + """ + Records a MarkerRecorded event in the history. Markers can be + used for adding custom information in the history for instance + to let deciders know that they do not need to look at the + history beyond the marker event. + """ + o = {} + o['decisionType'] = 'RecordMarker' + attrs = o['recordMarkerDecisionAttributes'] = {} + attrs['markerName'] = marker_name + if details is not None: + attrs['details'] = details + self._data.append(o) + + def complete_workflow_execution(self, result=None): + """ + Closes the workflow execution and records a WorkflowExecutionCompleted + event in the history + """ + o = {} + o['decisionType'] = 'CompleteWorkflowExecution' + attrs = o['completeWorkflowExecutionDecisionAttributes'] = {} + if result is not None: + attrs['result'] = result + self._data.append(o) + + def fail_workflow_execution(self, reason=None, details=None): + """ + Closes the workflow execution and records a + WorkflowExecutionFailed event in the history. + """ + o = {} + o['decisionType'] = 'FailWorkflowExecution' + attrs = o['failWorkflowExecutionDecisionAttributes'] = {} + if reason is not None: + attrs['reason'] = reason + if details is not None: + attrs['details'] = details + self._data.append(o) + + def cancel_workflow_executions(self, details=None): + """ + Closes the workflow execution and records a WorkflowExecutionCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelWorkflowExecution' + attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {} + if details is not None: + attrs['details'] = details + self._data.append(o) + + def continue_as_new_workflow_execution(self, + child_policy=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + start_to_close_timeout=None, + workflow_type_version=None): + """ + Closes the workflow execution and starts a new workflow execution of + the same type using the same workflow id and a unique run Id. A + WorkflowExecutionContinuedAsNew event is recorded in the history. + """ + o = {} + o['decisionType'] = 'ContinueAsNewWorkflowExecution' + attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {} + if child_policy is not None: + attrs['childPolicy'] = child_policy + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if start_to_close_timeout is not None: + attrs['taskStartToCloseTimeout'] = start_to_close_timeout + if workflow_type_version is not None: + attrs['workflowTypeVersion'] = workflow_type_version + self._data.append(o) + + def start_timer(self, + start_to_fire_timeout, + timer_id, + control=None): + """ + Starts a timer for this workflow execution and records a TimerStarted + event in the history. This timer will fire after the specified delay + and record a TimerFired event. + """ + o = {} + o['decisionType'] = 'StartTimer' + attrs = o['startTimerDecisionAttributes'] = {} + attrs['startToFireTimeout'] = start_to_fire_timeout + attrs['timerId'] = timer_id + if control is not None: + attrs['control'] = control + self._data.append(o) + + def cancel_timer(self, timer_id): + """ + Cancels a previously started timer and records a TimerCanceled + event in the history. + """ + o = {} + o['decisionType'] = 'CancelTimer' + attrs = o['cancelTimerDecisionAttributes'] = {} + attrs['timerId'] = timer_id + self._data.append(o) + + def signal_external_workflow_execution(self, + workflow_id, + signal_name, + run_id=None, + control=None, + input=None): + """ + Requests a signal to be delivered to the specified external workflow + execution and records a SignalExternalWorkflowExecutionInitiated + event in the history. + """ + o = {} + o['decisionType'] = 'SignalExternalWorkflowExecution' + attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + attrs['signalName'] = signal_name + if run_id is not None: + attrs['runId'] = run_id + if control is not None: + attrs['control'] = control + if input is not None: + attrs['input'] = input + self._data.append(o) + + def request_cancel_external_workflow_execution(self, + workflow_id, + control=None, + run_id=None): + """ + Requests that a request be made to cancel the specified + external workflow execution and records a + RequestCancelExternalWorkflowExecutionInitiated event in the + history. + """ + o = {} + o['decisionType'] = 'RequestCancelExternalWorkflowExecution' + attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowId'] = workflow_id + if control is not None: + attrs['control'] = control + if run_id is not None: + attrs['runId'] = run_id + self._data.append(o) + + def start_child_workflow_execution(self, + workflow_type_name, + workflow_type_version, + workflow_id, + child_policy=None, + control=None, + execution_start_to_close_timeout=None, + input=None, + tag_list=None, + task_list=None, + task_start_to_close_timeout=None): + """ + Requests that a child workflow execution be started and + records a StartChildWorkflowExecutionInitiated event in the + history. The child workflow execution is a separate workflow + execution with its own history. + """ + o = {} + o['decisionType'] = 'StartChildWorkflowExecution' + attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {} + attrs['workflowType'] = { + 'name': workflow_type_name, + 'version': workflow_type_version, + } + attrs['workflowId'] = workflow_id + if child_policy is not None: + attrs['childPolicy'] = child_policy + if control is not None: + attrs['control'] = control + if execution_start_to_close_timeout is not None: + attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout + if input is not None: + attrs['input'] = input + if tag_list is not None: + attrs['tagList'] = tag_list + if task_list is not None: + attrs['taskList'] = {'name': task_list} + if task_start_to_close_timeout is not None: + attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout + self._data.append(o) diff --git a/ext/boto/swf/layer2.py b/ext/boto/swf/layer2.py new file mode 100644 index 0000000000..b829810b33 --- /dev/null +++ b/ext/boto/swf/layer2.py @@ -0,0 +1,347 @@ +"""Object-oriented interface to SWF wrapping boto.swf.layer1.Layer1""" + +import time +from functools import wraps +from boto.swf.layer1 import Layer1 +from boto.swf.layer1_decisions import Layer1Decisions + +DEFAULT_CREDENTIALS = { + 'aws_access_key_id': None, + 'aws_secret_access_key': None +} + +def set_default_credentials(aws_access_key_id, aws_secret_access_key): + """Set default credentials.""" + DEFAULT_CREDENTIALS.update({ + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + }) + +class SWFBase(object): + + name = None + domain = None + aws_access_key_id = None + aws_secret_access_key = None + region = None + + def __init__(self, **kwargs): + # Set default credentials. + for credkey in ('aws_access_key_id', 'aws_secret_access_key'): + if DEFAULT_CREDENTIALS.get(credkey): + setattr(self, credkey, DEFAULT_CREDENTIALS[credkey]) + # Override attributes with keyword args. + for kwarg in kwargs: + setattr(self, kwarg, kwargs[kwarg]) + + self._swf = Layer1(self.aws_access_key_id, + self.aws_secret_access_key, + region=self.region) + + def __repr__(self): + rep_str = str(self.name) + if hasattr(self, 'version'): + rep_str += '-' + str(getattr(self, 'version')) + return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self)) + +class Domain(SWFBase): + + """Simple Workflow Domain.""" + + description = None + retention = 30 + @wraps(Layer1.describe_domain) + def describe(self): + """DescribeDomain.""" + return self._swf.describe_domain(self.name) + + @wraps(Layer1.deprecate_domain) + def deprecate(self): + """DeprecateDomain""" + self._swf.deprecate_domain(self.name) + + @wraps(Layer1.register_domain) + def register(self): + """RegisterDomain.""" + self._swf.register_domain(self.name, str(self.retention), + self.description) + + @wraps(Layer1.list_activity_types) + def activities(self, status='REGISTERED', **kwargs): + """ListActivityTypes.""" + act_types = self._swf.list_activity_types(self.name, status, **kwargs) + act_objects = [] + for act_args in act_types['typeInfos']: + act_ident = act_args['activityType'] + del act_args['activityType'] + act_args.update(act_ident) + act_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + act_objects.append(ActivityType(**act_args)) + return act_objects + + @wraps(Layer1.list_workflow_types) + def workflows(self, status='REGISTERED', **kwargs): + """ListWorkflowTypes.""" + wf_types = self._swf.list_workflow_types(self.name, status, **kwargs) + wf_objects = [] + for wf_args in wf_types['typeInfos']: + wf_ident = wf_args['workflowType'] + del wf_args['workflowType'] + wf_args.update(wf_ident) + wf_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + + wf_objects.append(WorkflowType(**wf_args)) + return wf_objects + + def executions(self, closed=False, **kwargs): + """List list open/closed executions. + + For a full list of available parameters refer to + :py:func:`boto.swf.layer1.Layer1.list_closed_workflow_executions` and + :py:func:`boto.swf.layer1.Layer1.list_open_workflow_executions` + """ + if closed: + executions = self._swf.list_closed_workflow_executions(self.name, + **kwargs) + else: + if 'oldest_date' not in kwargs: + # Last 24 hours. + kwargs['oldest_date'] = time.time() - (3600 * 24) + executions = self._swf.list_open_workflow_executions(self.name, + **kwargs) + exe_objects = [] + for exe_args in executions['executionInfos']: + for nested_key in ('execution', 'workflowType'): + nested_dict = exe_args[nested_key] + del exe_args[nested_key] + exe_args.update(nested_dict) + + exe_args.update({ + 'aws_access_key_id': self.aws_access_key_id, + 'aws_secret_access_key': self.aws_secret_access_key, + 'domain': self.name, + 'region': self.region, + }) + + exe_objects.append(WorkflowExecution(**exe_args)) + return exe_objects + + @wraps(Layer1.count_pending_activity_tasks) + def count_pending_activity_tasks(self, task_list): + """CountPendingActivityTasks.""" + return self._swf.count_pending_activity_tasks(self.name, task_list) + + @wraps(Layer1.count_pending_decision_tasks) + def count_pending_decision_tasks(self, task_list): + """CountPendingDecisionTasks.""" + return self._swf.count_pending_decision_tasks(self.name, task_list) + + +class Actor(SWFBase): + + task_list = None + last_tasktoken = None + domain = None + + def run(self): + """To be overloaded by subclasses.""" + raise NotImplementedError() + +class ActivityWorker(Actor): + + """Base class for SimpleWorkflow activity workers.""" + + @wraps(Layer1.respond_activity_task_canceled) + def cancel(self, task_token=None, details=None): + """RespondActivityTaskCanceled.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_canceled(task_token, details) + + @wraps(Layer1.respond_activity_task_completed) + def complete(self, task_token=None, result=None): + """RespondActivityTaskCompleted.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_completed(task_token, result) + + @wraps(Layer1.respond_activity_task_failed) + def fail(self, task_token=None, details=None, reason=None): + """RespondActivityTaskFailed.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_activity_task_failed(task_token, details, + reason) + + @wraps(Layer1.record_activity_task_heartbeat) + def heartbeat(self, task_token=None, details=None): + """RecordActivityTaskHeartbeat.""" + if task_token is None: + task_token = self.last_tasktoken + return self._swf.record_activity_task_heartbeat(task_token, details) + + @wraps(Layer1.poll_for_activity_task) + def poll(self, **kwargs): + """PollForActivityTask.""" + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + task = self._swf.poll_for_activity_task(self.domain, task_list, + **kwargs) + self.last_tasktoken = task.get('taskToken') + return task + +class Decider(Actor): + + """Base class for SimpleWorkflow deciders.""" + + @wraps(Layer1.respond_decision_task_completed) + def complete(self, task_token=None, decisions=None, **kwargs): + """RespondDecisionTaskCompleted.""" + if isinstance(decisions, Layer1Decisions): + # Extract decision list from a Layer1Decisions instance. + decisions = decisions._data + if task_token is None: + task_token = self.last_tasktoken + return self._swf.respond_decision_task_completed(task_token, decisions, + **kwargs) + + @wraps(Layer1.poll_for_decision_task) + def poll(self, **kwargs): + """PollForDecisionTask.""" + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + decision_task = self._swf.poll_for_decision_task(self.domain, task_list, + **kwargs) + self.last_tasktoken = decision_task.get('taskToken') + return decision_task + +class WorkflowType(SWFBase): + + """A versioned workflow type.""" + + version = None + task_list = None + child_policy = 'TERMINATE' + + @wraps(Layer1.describe_workflow_type) + def describe(self): + """DescribeWorkflowType.""" + return self._swf.describe_workflow_type(self.domain, self.name, + self.version) + @wraps(Layer1.register_workflow_type) + def register(self, **kwargs): + """RegisterWorkflowType.""" + args = { + 'default_execution_start_to_close_timeout': '3600', + 'default_task_start_to_close_timeout': '300', + 'default_child_policy': 'TERMINATE', + } + args.update(kwargs) + self._swf.register_workflow_type(self.domain, self.name, self.version, + **args) + + @wraps(Layer1.deprecate_workflow_type) + def deprecate(self): + """DeprecateWorkflowType.""" + self._swf.deprecate_workflow_type(self.domain, self.name, self.version) + + @wraps(Layer1.start_workflow_execution) + def start(self, **kwargs): + """StartWorkflowExecution.""" + if 'workflow_id' in kwargs: + workflow_id = kwargs['workflow_id'] + del kwargs['workflow_id'] + else: + workflow_id = '%s-%s-%i' % (self.name, self.version, time.time()) + + for def_attr in ('task_list', 'child_policy'): + kwargs[def_attr] = kwargs.get(def_attr, getattr(self, def_attr)) + run_id = self._swf.start_workflow_execution(self.domain, workflow_id, + self.name, self.version, **kwargs)['runId'] + return WorkflowExecution(name=self.name, version=self.version, + runId=run_id, domain=self.domain, workflowId=workflow_id, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key) + +class WorkflowExecution(SWFBase): + + """An instance of a workflow.""" + + workflowId = None + runId = None + + @wraps(Layer1.signal_workflow_execution) + def signal(self, signame, **kwargs): + """SignalWorkflowExecution.""" + self._swf.signal_workflow_execution(self.domain, signame, + self.workflowId, **kwargs) + + @wraps(Layer1.terminate_workflow_execution) + def terminate(self, **kwargs): + """TerminateWorkflowExecution (p. 103).""" + return self._swf.terminate_workflow_execution(self.domain, + self.workflowId, **kwargs) + + @wraps(Layer1.get_workflow_execution_history) + def history(self, **kwargs): + """GetWorkflowExecutionHistory.""" + return self._swf.get_workflow_execution_history(self.domain, self.runId, + self.workflowId, **kwargs)['events'] + + @wraps(Layer1.describe_workflow_execution) + def describe(self): + """DescribeWorkflowExecution.""" + return self._swf.describe_workflow_execution(self.domain, self.runId, + self.workflowId) + + @wraps(Layer1.request_cancel_workflow_execution) + def request_cancel(self): + """RequestCancelWorkflowExecution.""" + return self._swf.request_cancel_workflow_execution(self.domain, + self.workflowId, self.runId) + + +class ActivityType(SWFBase): + + """A versioned activity type.""" + + version = None + + @wraps(Layer1.deprecate_activity_type) + def deprecate(self): + """DeprecateActivityType.""" + return self._swf.deprecate_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.describe_activity_type) + def describe(self): + """DescribeActivityType.""" + return self._swf.describe_activity_type(self.domain, self.name, + self.version) + + @wraps(Layer1.register_activity_type) + def register(self, **kwargs): + """RegisterActivityType.""" + args = { + 'default_task_heartbeat_timeout': '600', + 'default_task_schedule_to_close_timeout': '3900', + 'default_task_schedule_to_start_timeout': '300', + 'default_task_start_to_close_timeout': '3600', + } + args.update(kwargs) + self._swf.register_activity_type(self.domain, self.name, self.version, + **args) diff --git a/ext/boto/utils.py b/ext/boto/utils.py new file mode 100644 index 0000000000..39a8cf77aa --- /dev/null +++ b/ext/boto/utils.py @@ -0,0 +1,1091 @@ +# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Some handy utility functions used by several classes. +""" + +import subprocess +import time +import logging.handlers +import boto +import boto.provider +import tempfile +import random +import smtplib +import datetime +import re +import email.mime.multipart +import email.mime.base +import email.mime.text +import email.utils +import email.encoders +import gzip +import threading +import locale +from boto.compat import six, StringIO, urllib, encodebytes + +from contextlib import contextmanager + +from hashlib import md5, sha512 +_hashfn = sha512 + +from boto.compat import json + +try: + from boto.compat.json import JSONDecodeError +except ImportError: + JSONDecodeError = ValueError + +# List of Query String Arguments of Interest +qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging', + 'partNumber', 'policy', 'requestPayment', 'torrent', + 'versioning', 'versionId', 'versions', 'website', + 'uploads', 'uploadId', 'response-content-type', + 'response-content-language', 'response-expires', + 'response-cache-control', 'response-content-disposition', + 'response-content-encoding', 'delete', 'lifecycle', + 'tagging', 'restore', + # storageClass is a QSA for buckets in Google Cloud Storage. + # (StorageClass is associated to individual keys in S3, but + # having it listed here should cause no problems because + # GET bucket?storageClass is not part of the S3 API.) + 'storageClass', + # websiteConfig is a QSA for buckets in Google Cloud + # Storage. + 'websiteConfig', + # compose is a QSA for objects in Google Cloud Storage. + 'compose'] + + +_first_cap_regex = re.compile('(.)([A-Z][a-z]+)') +_number_cap_regex = re.compile('([a-z])([0-9]+)') +_end_cap_regex = re.compile('([a-z0-9])([A-Z])') + + +def unquote_v(nv): + if len(nv) == 1: + return nv + else: + return (nv[0], urllib.parse.unquote(nv[1])) + + +def canonical_string(method, path, headers, expires=None, + provider=None): + """ + Generates the aws canonical string for the given parameters + """ + if not provider: + provider = boto.provider.get_default() + interesting_headers = {} + for key in headers: + lk = key.lower() + if headers[key] is not None and \ + (lk in ['content-md5', 'content-type', 'date'] or + lk.startswith(provider.header_prefix)): + interesting_headers[lk] = str(headers[key]).strip() + + # these keys get empty strings if they don't exist + if 'content-type' not in interesting_headers: + interesting_headers['content-type'] = '' + if 'content-md5' not in interesting_headers: + interesting_headers['content-md5'] = '' + + # just in case someone used this. it's not necessary in this lib. + if provider.date_header in interesting_headers: + interesting_headers['date'] = '' + + # if you're using expires for query string auth, then it trumps date + # (and provider.date_header) + if expires: + interesting_headers['date'] = str(expires) + + sorted_header_keys = sorted(interesting_headers.keys()) + + buf = "%s\n" % method + for key in sorted_header_keys: + val = interesting_headers[key] + if key.startswith(provider.header_prefix): + buf += "%s:%s\n" % (key, val) + else: + buf += "%s\n" % val + + # don't include anything after the first ? in the resource... + # unless it is one of the QSA of interest, defined above + t = path.split('?') + buf += t[0] + + if len(t) > 1: + qsa = t[1].split('&') + qsa = [a.split('=', 1) for a in qsa] + qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest] + if len(qsa) > 0: + qsa.sort(key=lambda x: x[0]) + qsa = ['='.join(a) for a in qsa] + buf += '?' + buf += '&'.join(qsa) + + return buf + + +def merge_meta(headers, metadata, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + final_headers = headers.copy() + for k in metadata.keys(): + if k.lower() in boto.s3.key.Key.base_user_settable_fields: + final_headers[k] = metadata[k] + else: + final_headers[metadata_prefix + k] = metadata[k] + + return final_headers + + +def get_aws_metadata(headers, provider=None): + if not provider: + provider = boto.provider.get_default() + metadata_prefix = provider.metadata_prefix + metadata = {} + for hkey in headers.keys(): + if hkey.lower().startswith(metadata_prefix): + val = urllib.parse.unquote(headers[hkey]) + if isinstance(val, bytes): + try: + val = val.decode('utf-8') + except UnicodeDecodeError: + # Just leave the value as-is + pass + metadata[hkey[len(metadata_prefix):]] = val + del headers[hkey] + return metadata + + +def retry_url(url, retry_on_404=True, num_retries=10, timeout=None): + """ + Retry a url. This is specifically used for accessing the metadata + service on an instance. Since this address should never be proxied + (for security reasons), we create a ProxyHandler with a NULL + dictionary to override any proxy settings in the environment. + """ + for i in range(0, num_retries): + try: + proxy_handler = urllib.request.ProxyHandler({}) + opener = urllib.request.build_opener(proxy_handler) + req = urllib.request.Request(url) + r = opener.open(req, timeout=timeout) + result = r.read() + + if(not isinstance(result, six.string_types) and + hasattr(result, 'decode')): + result = result.decode('utf-8') + + return result + except urllib.error.HTTPError as e: + code = e.getcode() + if code == 404 and not retry_on_404: + return '' + except Exception as e: + boto.log.exception('Caught exception reading instance data') + # If not on the last iteration of the loop then sleep. + if i + 1 != num_retries: + boto.log.debug('Sleeping before retrying') + time.sleep(min(2 ** i, + boto.config.get('Boto', 'max_retry_delay', 60))) + boto.log.error('Unable to read instance data, giving up') + return '' + + +def _get_instance_metadata(url, num_retries, timeout=None): + return LazyLoadMetadata(url, num_retries, timeout) + + +class LazyLoadMetadata(dict): + def __init__(self, url, num_retries, timeout=None): + self._url = url + self._num_retries = num_retries + self._leaves = {} + self._dicts = [] + self._timeout = timeout + data = boto.utils.retry_url(self._url, num_retries=self._num_retries, timeout=self._timeout) + if data: + fields = data.split('\n') + for field in fields: + if field.endswith('/'): + key = field[0:-1] + self._dicts.append(key) + else: + p = field.find('=') + if p > 0: + key = field[p + 1:] + resource = field[0:p] + '/openssh-key' + else: + key = resource = field + self._leaves[key] = resource + self[key] = None + + def _materialize(self): + for key in self: + self[key] + + def __getitem__(self, key): + if key not in self: + # allow dict to throw the KeyError + return super(LazyLoadMetadata, self).__getitem__(key) + + # already loaded + val = super(LazyLoadMetadata, self).__getitem__(key) + if val is not None: + return val + + if key in self._leaves: + resource = self._leaves[key] + last_exception = None + + for i in range(0, self._num_retries): + try: + val = boto.utils.retry_url( + self._url + urllib.parse.quote(resource, + safe="/:"), + num_retries=self._num_retries, + timeout=self._timeout) + if val and val[0] == '{': + val = json.loads(val) + break + else: + p = val.find('\n') + if p > 0: + val = val.split('\n') + break + + except JSONDecodeError as e: + boto.log.debug( + "encountered '%s' exception: %s" % ( + e.__class__.__name__, e)) + boto.log.debug( + 'corrupted JSON data found: %s' % val) + last_exception = e + + except Exception as e: + boto.log.debug("encountered unretryable" + + " '%s' exception, re-raising" % ( + e.__class__.__name__)) + last_exception = e + raise + + boto.log.error("Caught exception reading meta data" + + " for the '%s' try" % (i + 1)) + + if i + 1 != self._num_retries: + next_sleep = min( + random.random() * 2 ** i, + boto.config.get('Boto', 'max_retry_delay', 60)) + time.sleep(next_sleep) + else: + boto.log.error('Unable to read meta data, giving up') + boto.log.error( + "encountered '%s' exception: %s" % ( + last_exception.__class__.__name__, last_exception)) + raise last_exception + + self[key] = val + elif key in self._dicts: + self[key] = LazyLoadMetadata(self._url + key + '/', + self._num_retries) + + return super(LazyLoadMetadata, self).__getitem__(key) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def values(self): + self._materialize() + return super(LazyLoadMetadata, self).values() + + def items(self): + self._materialize() + return super(LazyLoadMetadata, self).items() + + def __str__(self): + self._materialize() + return super(LazyLoadMetadata, self).__str__() + + def __repr__(self): + self._materialize() + return super(LazyLoadMetadata, self).__repr__() + + +def _build_instance_metadata_url(url, version, path): + """ + Builds an EC2 metadata URL for fetching information about an instance. + + Example: + + >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data/') + http://169.254.169.254/latest/meta-data/ + + :type url: string + :param url: URL to metadata service, e.g. 'http://169.254.169.254' + + :type version: string + :param version: Version of the metadata to get, e.g. 'latest' + + :type path: string + :param path: Path of the metadata to get, e.g. 'meta-data/'. If a trailing + slash is required it must be passed in with the path. + + :return: The full metadata URL + """ + return '%s/%s/%s' % (url, version, path) + + +def get_instance_metadata(version='latest', url='http://169.254.169.254', + data='meta-data/', timeout=None, num_retries=5): + """ + Returns the instance metadata as a nested Python dictionary. + Simple values (e.g. local_hostname, hostname, etc.) will be + stored as string values. Values such as ancestor-ami-ids will + be stored in the dict as a list of string values. More complex + fields such as public-keys and will be stored as nested dicts. + + If the timeout is specified, the connection to the specified url + will time out after the specified number of seconds. + + """ + try: + metadata_url = _build_instance_metadata_url(url, version, data) + return _get_instance_metadata(metadata_url, num_retries=num_retries, timeout=timeout) + except urllib.error.URLError: + boto.log.exception("Exception caught when trying to retrieve " + "instance metadata for: %s", data) + return None + + +def get_instance_identity(version='latest', url='http://169.254.169.254', + timeout=None, num_retries=5): + """ + Returns the instance identity as a nested Python dictionary. + """ + iid = {} + base_url = _build_instance_metadata_url(url, version, + 'dynamic/instance-identity/') + try: + data = retry_url(base_url, num_retries=num_retries, timeout=timeout) + fields = data.split('\n') + for field in fields: + val = retry_url(base_url + '/' + field + '/', num_retries=num_retries, timeout=timeout) + if val[0] == '{': + val = json.loads(val) + if field: + iid[field] = val + return iid + except urllib.error.URLError: + return None + + +def get_instance_userdata(version='latest', sep=None, + url='http://169.254.169.254', timeout=None, num_retries=5): + ud_url = _build_instance_metadata_url(url, version, 'user-data') + user_data = retry_url(ud_url, retry_on_404=False, num_retries=num_retries, timeout=timeout) + if user_data: + if sep: + l = user_data.split(sep) + user_data = {} + for nvpair in l: + t = nvpair.split('=') + user_data[t[0].strip()] = t[1].strip() + return user_data + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' +ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ' +RFC1123 = '%a, %d %b %Y %H:%M:%S %Z' +LOCALE_LOCK = threading.Lock() + + +@contextmanager +def setlocale(name): + """ + A context manager to set the locale in a threadsafe manner. + """ + with LOCALE_LOCK: + saved = locale.setlocale(locale.LC_ALL) + + try: + yield locale.setlocale(locale.LC_ALL, name) + finally: + locale.setlocale(locale.LC_ALL, saved) + + +def get_ts(ts=None): + if not ts: + ts = time.gmtime() + return time.strftime(ISO8601, ts) + + +def parse_ts(ts): + with setlocale('C'): + ts = ts.strip() + try: + dt = datetime.datetime.strptime(ts, ISO8601) + return dt + except ValueError: + try: + dt = datetime.datetime.strptime(ts, ISO8601_MS) + return dt + except ValueError: + dt = datetime.datetime.strptime(ts, RFC1123) + return dt + + +def find_class(module_name, class_name=None): + if class_name: + module_name = "%s.%s" % (module_name, class_name) + modules = module_name.split('.') + c = None + + try: + for m in modules[1:]: + if c: + c = getattr(c, m) + else: + c = getattr(__import__(".".join(modules[0:-1])), m) + return c + except: + return None + + +def update_dme(username, password, dme_id, ip_address): + """ + Update your Dynamic DNS record with DNSMadeEasy.com + """ + dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip' + dme_url += '?username=%s&password=%s&id=%s&ip=%s' + s = urllib.request.urlopen(dme_url % (username, password, dme_id, ip_address)) + return s.read() + + +def fetch_file(uri, file=None, username=None, password=None): + """ + Fetch a file based on the URI provided. + If you do not pass in a file pointer a tempfile.NamedTemporaryFile, + or None if the file could not be retrieved is returned. + The URI can be either an HTTP url, or "s3://bucket_name/key_name" + """ + boto.log.info('Fetching %s' % uri) + if file is None: + file = tempfile.NamedTemporaryFile() + try: + if uri.startswith('s3://'): + bucket_name, key_name = uri[len('s3://'):].split('/', 1) + c = boto.connect_s3(aws_access_key_id=username, + aws_secret_access_key=password) + bucket = c.get_bucket(bucket_name) + key = bucket.get_key(key_name) + key.get_contents_to_file(file) + else: + if username and password: + passman = urllib.request.HTTPPasswordMgrWithDefaultRealm() + passman.add_password(None, uri, username, password) + authhandler = urllib.request.HTTPBasicAuthHandler(passman) + opener = urllib.request.build_opener(authhandler) + urllib.request.install_opener(opener) + s = urllib.request.urlopen(uri) + file.write(s.read()) + file.seek(0) + except: + raise + boto.log.exception('Problem Retrieving file: %s' % uri) + file = None + return file + + +class ShellCommand(object): + + def __init__(self, command, wait=True, fail_fast=False, cwd=None): + self.exit_code = 0 + self.command = command + self.log_fp = StringIO() + self.wait = wait + self.fail_fast = fail_fast + self.run(cwd=cwd) + + def run(self, cwd=None): + boto.log.info('running:%s' % self.command) + self.process = subprocess.Popen(self.command, shell=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=cwd) + if(self.wait): + while self.process.poll() is None: + time.sleep(1) + t = self.process.communicate() + self.log_fp.write(t[0]) + self.log_fp.write(t[1]) + boto.log.info(self.log_fp.getvalue()) + self.exit_code = self.process.returncode + + if self.fail_fast and self.exit_code != 0: + raise Exception("Command " + self.command + + " failed with status " + self.exit_code) + + return self.exit_code + + def setReadOnly(self, value): + raise AttributeError + + def getStatus(self): + return self.exit_code + + status = property(getStatus, setReadOnly, None, + 'The exit code for the command') + + def getOutput(self): + return self.log_fp.getvalue() + + output = property(getOutput, setReadOnly, None, + 'The STDIN and STDERR output of the command') + + +class AuthSMTPHandler(logging.handlers.SMTPHandler): + """ + This class extends the SMTPHandler in the standard Python logging module + to accept a username and password on the constructor and to then use those + credentials to authenticate with the SMTP server. To use this, you could + add something like this in your boto config file: + + [handler_hand07] + class=boto.utils.AuthSMTPHandler + level=WARN + formatter=form07 + args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject') + """ + + def __init__(self, mailhost, username, password, + fromaddr, toaddrs, subject): + """ + Initialize the handler. + + We have extended the constructor to accept a username/password + for SMTP authentication. + """ + super(AuthSMTPHandler, self).__init__(mailhost, fromaddr, + toaddrs, subject) + self.username = username + self.password = password + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + It would be really nice if I could add authorization to this class + without having to resort to cut and paste inheritance but, no. + """ + try: + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + smtp.login(self.username, self.password) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + ','.join(self.toaddrs), + self.getSubject(record), + email.utils.formatdate(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + +class LRUCache(dict): + """A dictionary-like object that stores only a certain number of items, and + discards its least recently used item when full. + + >>> cache = LRUCache(3) + >>> cache['A'] = 0 + >>> cache['B'] = 1 + >>> cache['C'] = 2 + >>> len(cache) + 3 + + >>> cache['A'] + 0 + + Adding new items to the cache does not increase its size. Instead, the least + recently used item is dropped: + + >>> cache['D'] = 3 + >>> len(cache) + 3 + >>> 'B' in cache + False + + Iterating over the cache returns the keys, starting with the most recently + used: + + >>> for key in cache: + ... print key + D + A + C + + This code is based on the LRUCache class from Genshi which is based on + `Myghty `_'s LRUCache from ``myghtyutils.util``, + written by Mike Bayer and released under the MIT license (Genshi uses the + BSD License). + """ + + class _Item(object): + def __init__(self, key, value): + self.previous = self.next = None + self.key = key + self.value = value + + def __repr__(self): + return repr(self.value) + + def __init__(self, capacity): + self._dict = dict() + self.capacity = capacity + self.head = None + self.tail = None + + def __contains__(self, key): + return key in self._dict + + def __iter__(self): + cur = self.head + while cur: + yield cur.key + cur = cur.next + + def __len__(self): + return len(self._dict) + + def __getitem__(self, key): + item = self._dict[key] + self._update_item(item) + return item.value + + def __setitem__(self, key, value): + item = self._dict.get(key) + if item is None: + item = self._Item(key, value) + self._dict[key] = item + self._insert_item(item) + else: + item.value = value + self._update_item(item) + self._manage_size() + + def __repr__(self): + return repr(self._dict) + + def _insert_item(self, item): + item.previous = None + item.next = self.head + if self.head is not None: + self.head.previous = item + else: + self.tail = item + self.head = item + self._manage_size() + + def _manage_size(self): + while len(self._dict) > self.capacity: + del self._dict[self.tail.key] + if self.tail != self.head: + self.tail = self.tail.previous + self.tail.next = None + else: + self.head = self.tail = None + + def _update_item(self, item): + if self.head == item: + return + + previous = item.previous + previous.next = item.next + if item.next is not None: + item.next.previous = previous + else: + self.tail = previous + + item.previous = None + item.next = self.head + self.head.previous = self.head = item + + +class Password(object): + """ + Password object that stores itself as hashed. + Hash defaults to SHA512 if available, MD5 otherwise. + """ + hashfunc = _hashfn + + def __init__(self, str=None, hashfunc=None): + """ + Load the string from an initial value, this should be the + raw hashed password. + """ + self.str = str + if hashfunc: + self.hashfunc = hashfunc + + def set(self, value): + if not isinstance(value, bytes): + value = value.encode('utf-8') + self.str = self.hashfunc(value).hexdigest() + + def __str__(self): + return str(self.str) + + def __eq__(self, other): + if other is None: + return False + if not isinstance(other, bytes): + other = other.encode('utf-8') + return str(self.hashfunc(other).hexdigest()) == str(self.str) + + def __len__(self): + if self.str: + return len(self.str) + else: + return 0 + + +def notify(subject, body=None, html_body=None, to_string=None, + attachments=None, append_instance_id=True): + attachments = attachments or [] + if append_instance_id: + subject = "[%s] %s" % ( + boto.config.get_value("Instance", "instance-id"), subject) + if not to_string: + to_string = boto.config.get_value('Notification', 'smtp_to', None) + if to_string: + try: + from_string = boto.config.get_value('Notification', + 'smtp_from', 'boto') + msg = email.mime.multipart.MIMEMultipart() + msg['From'] = from_string + msg['Reply-To'] = from_string + msg['To'] = to_string + msg['Date'] = email.utils.formatdate(localtime=True) + msg['Subject'] = subject + + if body: + msg.attach(email.mime.text.MIMEText(body)) + + if html_body: + part = email.mime.base.MIMEBase('text', 'html') + part.set_payload(html_body) + email.encoders.encode_base64(part) + msg.attach(part) + + for part in attachments: + msg.attach(part) + + smtp_host = boto.config.get_value('Notification', + 'smtp_host', 'localhost') + + # Alternate port support + if boto.config.get_value("Notification", "smtp_port"): + server = smtplib.SMTP(smtp_host, int( + boto.config.get_value("Notification", "smtp_port"))) + else: + server = smtplib.SMTP(smtp_host) + + # TLS support + if boto.config.getbool("Notification", "smtp_tls"): + server.ehlo() + server.starttls() + server.ehlo() + smtp_user = boto.config.get_value('Notification', 'smtp_user', '') + smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '') + if smtp_user: + server.login(smtp_user, smtp_pass) + server.sendmail(from_string, to_string, msg.as_string()) + server.quit() + except: + boto.log.exception('notify failed') + + +def get_utf8_value(value): + if not six.PY2 and isinstance(value, bytes): + return value + + if not isinstance(value, six.string_types): + value = six.text_type(value) + + if isinstance(value, six.text_type): + value = value.encode('utf-8') + + return value + + +def mklist(value): + if not isinstance(value, list): + if isinstance(value, tuple): + value = list(value) + else: + value = [value] + return value + + +def pythonize_name(name): + """Convert camel case to a "pythonic" name. + + Examples:: + + pythonize_name('CamelCase') -> 'camel_case' + pythonize_name('already_pythonized') -> 'already_pythonized' + pythonize_name('HTTPRequest') -> 'http_request' + pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok' + pythonize_name('UPPER') -> 'upper' + pythonize_name('') -> '' + + """ + s1 = _first_cap_regex.sub(r'\1_\2', name) + s2 = _number_cap_regex.sub(r'\1_\2', s1) + return _end_cap_regex.sub(r'\1_\2', s2).lower() + + +def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'): + """Description: + :param content: A list of tuples of name-content pairs. This is used + instead of a dict to ensure that scripts run in order + :type list of tuples: + + :param compress: Use gzip to compress the scripts, defaults to no compression + :type bool: + + :param deftype: The type that should be assumed if nothing else can be figured out + :type str: + + :param delimiter: mime delimiter + :type str: + + :return: Final mime multipart + :rtype: str: + """ + wrapper = email.mime.multipart.MIMEMultipart() + for name, con in content: + definite_type = guess_mime_type(con, deftype) + maintype, subtype = definite_type.split('/', 1) + if maintype == 'text': + mime_con = email.mime.text.MIMEText(con, _subtype=subtype) + else: + mime_con = email.mime.base.MIMEBase(maintype, subtype) + mime_con.set_payload(con) + # Encode the payload using Base64 + email.encoders.encode_base64(mime_con) + mime_con.add_header('Content-Disposition', 'attachment', filename=name) + wrapper.attach(mime_con) + rcontent = wrapper.as_string() + + if compress: + buf = StringIO() + gz = gzip.GzipFile(mode='wb', fileobj=buf) + try: + gz.write(rcontent) + finally: + gz.close() + rcontent = buf.getvalue() + + return rcontent + + +def guess_mime_type(content, deftype): + """Description: Guess the mime type of a block of text + :param content: content we're finding the type of + :type str: + + :param deftype: Default mime type + :type str: + + :rtype: : + :return: + """ + # Mappings recognized by cloudinit + starts_with_mappings = { + '#include': 'text/x-include-url', + '#!': 'text/x-shellscript', + '#cloud-config': 'text/cloud-config', + '#upstart-job': 'text/upstart-job', + '#part-handler': 'text/part-handler', + '#cloud-boothook': 'text/cloud-boothook' + } + rtype = deftype + for possible_type, mimetype in starts_with_mappings.items(): + if content.startswith(possible_type): + rtype = mimetype + break + return(rtype) + + +def compute_md5(fp, buf_size=8192, size=None): + """ + Compute MD5 hash on passed file and return results in a tuple of values. + + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file pointer + will be reset to its current location before the + method returns. + + :type buf_size: integer + :param buf_size: Number of bytes per read request. + + :type size: int + :param size: (optional) The Maximum number of bytes to read from + the file pointer (fp). This is useful when uploading + a file in multiple parts where the file is being + split inplace into different parts. Less bytes may + be available. + + :rtype: tuple + :return: A tuple containing the hex digest version of the MD5 hash + as the first element, the base64 encoded version of the + plain digest as the second element and the data size as + the third element. + """ + return compute_hash(fp, buf_size, size, hash_algorithm=md5) + + +def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5): + hash_obj = hash_algorithm() + spos = fp.tell() + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + while s: + if not isinstance(s, bytes): + s = s.encode('utf-8') + hash_obj.update(s) + if size: + size -= len(s) + if size <= 0: + break + if size and size < buf_size: + s = fp.read(size) + else: + s = fp.read(buf_size) + hex_digest = hash_obj.hexdigest() + base64_digest = encodebytes(hash_obj.digest()).decode('utf-8') + if base64_digest[-1] == '\n': + base64_digest = base64_digest[0:-1] + # data_size based on bytes read. + data_size = fp.tell() - spos + fp.seek(spos) + return (hex_digest, base64_digest, data_size) + + +def find_matching_headers(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a list of matching header names, case-insensitive. + + """ + return [h for h in headers if h.lower() == name.lower()] + + +def merge_headers_by_name(name, headers): + """ + Takes a specific header name and a dict of headers {"name": "value"}. + Returns a string of all header values, comma-separated, that match the + input header name, case-insensitive. + + """ + matching_headers = find_matching_headers(name, headers) + return ','.join(str(headers[h]) for h in matching_headers + if headers[h] is not None) + + +class RequestHook(object): + """ + This can be extended and supplied to the connection object + to gain access to request and response object after the request completes. + One use for this would be to implement some specific request logging. + """ + def handle_request_data(self, request, response, error=False): + pass + + +def host_is_ipv6(hostname): + """ + Detect (naively) if the hostname is an IPV6 host. + Return a boolean. + """ + # empty strings or anything that is not a string is automatically not an + # IPV6 address + if not hostname or not isinstance(hostname, str): + return False + + if hostname.startswith('['): + return True + + if len(hostname.split(':')) > 2: + return True + + # Anything else that doesn't start with brackets or doesn't have more than + # one ':' should not be an IPV6 address. This is very naive but the rest of + # the connection chain should error accordingly for typos or ill formed + # addresses + return False + + +def parse_host(hostname): + """ + Given a hostname that may have a port name, ensure that the port is trimmed + returning only the host, including hostnames that are IPV6 and may include + brackets. + """ + # ensure that hostname does not have any whitespaces + hostname = hostname.strip() + + if host_is_ipv6(hostname): + return hostname.split(']:', 1)[0].strip('[]') + else: + return hostname.split(':', 1)[0] diff --git a/ext/boto/vendored/__init__.py b/ext/boto/vendored/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ext/boto/vendored/regions/__init__.py b/ext/boto/vendored/regions/__init__.py new file mode 100644 index 0000000000..1e49c17278 --- /dev/null +++ b/ext/boto/vendored/regions/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +# Since we will be resolving every region, it's worth not cluttering up the +# logs with all that data. +import logging + + +# Leaving the logger enabled would pollute the logs too much for boto, +# so here we disable them by default. +_endpoint_logger = logging.getLogger('boto.vendored.regions.regions') +_endpoint_logger.disabled = True diff --git a/ext/boto/vendored/regions/exceptions.py b/ext/boto/vendored/regions/exceptions.py new file mode 100644 index 0000000000..337d18d892 --- /dev/null +++ b/ext/boto/vendored/regions/exceptions.py @@ -0,0 +1,27 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + + +class BaseEndpointResolverError(Exception): + """Base error for endpoint resolving errors. + + Should never be raised directly, but clients can catch + this exception if they want to generically handle any errors + during the endpoint resolution process. + + """ + + +class NoRegionError(BaseEndpointResolverError): + """No region was specified.""" + fmt = 'You must specify a region.' diff --git a/ext/boto/vendored/regions/regions.py b/ext/boto/vendored/regions/regions.py new file mode 100644 index 0000000000..f1e3cc8b7e --- /dev/null +++ b/ext/boto/vendored/regions/regions.py @@ -0,0 +1,188 @@ +# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +"""Resolves regions and endpoints. + +This module implements endpoint resolution, including resolving endpoints for a +given service and region and resolving the available endpoints for a service +in a specific AWS partition. +""" +import logging +import re + +from boto.vendored.regions.exceptions import NoRegionError + +LOG = logging.getLogger(__name__) +DEFAULT_URI_TEMPLATE = '{service}.{region}.{dnsSuffix}' +DEFAULT_SERVICE_DATA = {'endpoints': {}} + + +class BaseEndpointResolver(object): + """Resolves regions and endpoints. Must be subclassed.""" + def construct_endpoint(self, service_name, region_name=None): + """Resolves an endpoint for a service and region combination. + + :type service_name: string + :param service_name: Name of the service to resolve an endpoint for + (e.g., s3) + + :type region_name: string + :param region_name: Region/endpoint name to resolve (e.g., us-east-1) + if no region is provided, the first found partition-wide endpoint + will be used if available. + + :rtype: dict + :return: Returns a dict containing the following keys: + - partition: (string, required) Resolved partition name + - endpointName: (string, required) Resolved endpoint name + - hostname: (string, required) Hostname to use for this endpoint + - sslCommonName: (string) sslCommonName to use for this endpoint. + - credentialScope: (dict) Signature version 4 credential scope + - region: (string) region name override when signing. + - service: (string) service name override when signing. + - signatureVersions: (list) A list of possible signature + versions, including s3, v4, v2, and s3v4 + - protocols: (list) A list of supported protocols + (e.g., http, https) + - ...: Other keys may be included as well based on the metadata + """ + raise NotImplementedError + + def get_available_partitions(self): + """Lists the partitions available to the endpoint resolver. + + :return: Returns a list of partition names (e.g., ["aws", "aws-cn"]). + """ + raise NotImplementedError + + def get_available_endpoints(self, service_name, partition_name='aws', + allow_non_regional=False): + """Lists the endpoint names of a particular partition. + + :type service_name: string + :param service_name: Name of a service to list endpoint for (e.g., s3) + + :type partition_name: string + :param partition_name: Name of the partition to limit endpoints to. + (e.g., aws for the public AWS endpoints, aws-cn for AWS China + endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc. + + :type allow_non_regional: bool + :param allow_non_regional: Set to True to include endpoints that are + not regional endpoints (e.g., s3-external-1, + fips-us-gov-west-1, etc). + :return: Returns a list of endpoint names (e.g., ["us-east-1"]). + """ + raise NotImplementedError + + +class EndpointResolver(BaseEndpointResolver): + """Resolves endpoints based on partition endpoint metadata""" + def __init__(self, endpoint_data): + """ + :param endpoint_data: A dict of partition data. + """ + if 'partitions' not in endpoint_data: + raise ValueError('Missing "partitions" in endpoint data') + self._endpoint_data = endpoint_data + + def get_available_partitions(self): + result = [] + for partition in self._endpoint_data['partitions']: + result.append(partition['partition']) + return result + + def get_available_endpoints(self, service_name, partition_name='aws', + allow_non_regional=False): + result = [] + for partition in self._endpoint_data['partitions']: + if partition['partition'] != partition_name: + continue + services = partition['services'] + if service_name not in services: + continue + for endpoint_name in services[service_name]['endpoints']: + if allow_non_regional or endpoint_name in partition['regions']: + result.append(endpoint_name) + return result + + def construct_endpoint(self, service_name, region_name=None): + # Iterate over each partition until a match is found. + for partition in self._endpoint_data['partitions']: + result = self._endpoint_for_partition( + partition, service_name, region_name) + if result: + return result + + def _endpoint_for_partition(self, partition, service_name, region_name): + # Get the service from the partition, or an empty template. + service_data = partition['services'].get( + service_name, DEFAULT_SERVICE_DATA) + # Use the partition endpoint if no region is supplied. + if region_name is None: + if 'partitionEndpoint' in service_data: + region_name = service_data['partitionEndpoint'] + else: + raise NoRegionError() + # Attempt to resolve the exact region for this partition. + if region_name in service_data['endpoints']: + return self._resolve( + partition, service_name, service_data, region_name) + # Check to see if the endpoint provided is valid for the partition. + if self._region_match(partition, region_name): + # Use the partition endpoint if set and not regionalized. + partition_endpoint = service_data.get('partitionEndpoint') + is_regionalized = service_data.get('isRegionalized', True) + if partition_endpoint and not is_regionalized: + LOG.debug('Using partition endpoint for %s, %s: %s', + service_name, region_name, partition_endpoint) + return self._resolve( + partition, service_name, service_data, partition_endpoint) + LOG.debug('Creating a regex based endpoint for %s, %s', + service_name, region_name) + return self._resolve( + partition, service_name, service_data, region_name) + + def _region_match(self, partition, region_name): + if region_name in partition['regions']: + return True + if 'regionRegex' in partition: + return re.compile(partition['regionRegex']).match(region_name) + return False + + def _resolve(self, partition, service_name, service_data, endpoint_name): + result = service_data['endpoints'].get(endpoint_name, {}) + result['partition'] = partition['partition'] + result['endpointName'] = endpoint_name + # Merge in the service defaults then the partition defaults. + self._merge_keys(service_data.get('defaults', {}), result) + self._merge_keys(partition.get('defaults', {}), result) + hostname = result.get('hostname', DEFAULT_URI_TEMPLATE) + result['hostname'] = self._expand_template( + partition, result['hostname'], service_name, endpoint_name) + if 'sslCommonName' in result: + result['sslCommonName'] = self._expand_template( + partition, result['sslCommonName'], service_name, + endpoint_name) + result['dnsSuffix'] = partition['dnsSuffix'] + return result + + def _merge_keys(self, from_data, result): + for key in from_data: + if key not in result: + result[key] = from_data[key] + + def _expand_template(self, partition, template, service_name, + endpoint_name): + return template.format( + service=service_name, region=endpoint_name, + dnsSuffix=partition['dnsSuffix']) diff --git a/ext/boto/vendored/six.py b/ext/boto/vendored/six.py new file mode 100644 index 0000000000..a104cb8715 --- /dev/null +++ b/ext/boto/vendored/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.9.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/ext/boto/vpc/__init__.py b/ext/boto/vpc/__init__.py new file mode 100644 index 0000000000..46adeeb1ed --- /dev/null +++ b/ext/boto/vpc/__init__.py @@ -0,0 +1,1827 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +from boto.ec2.connection import EC2Connection +from boto.resultset import ResultSet +from boto.vpc.vpc import VPC +from boto.vpc.customergateway import CustomerGateway +from boto.vpc.networkacl import NetworkAcl +from boto.vpc.routetable import RouteTable +from boto.vpc.internetgateway import InternetGateway +from boto.vpc.vpngateway import VpnGateway, Attachment +from boto.vpc.dhcpoptions import DhcpOptions +from boto.vpc.subnet import Subnet +from boto.vpc.vpnconnection import VpnConnection +from boto.vpc.vpc_peering_connection import VpcPeeringConnection +from boto.ec2 import RegionData +from boto.regioninfo import RegionInfo, get_regions +from boto.regioninfo import connect + + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the VPCConnection + object's constructor as keyword arguments and they will be + passed along to the VPCConnection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return get_regions('ec2', connection_cls=VPCConnection) + + +def connect_to_region(region_name, **kw_params): + """ + Given a valid region name, return a + :class:`boto.vpc.VPCConnection`. + Any additional parameters after the region_name are passed on to + the connect method of the region object. + + :type: str + :param region_name: The name of the region to connect to. + + :rtype: :class:`boto.vpc.VPCConnection` or ``None`` + :return: A connection to the given region, or None if an invalid region + name is given + """ + return connect('ec2', region_name, connection_cls=VPCConnection, + **kw_params) + + +class VPCConnection(EC2Connection): + + # VPC methods + + def get_all_vpcs(self, vpc_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your VPCs. You can filter results to + return information only about those VPCs that match your search + parameters. Otherwise, all VPCs associated with your account + are returned. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + * *state* - a list of states of the VPC (pending or available) + * *cidrBlock* - a list CIDR blocks of the VPC + * *dhcpOptionsId* - a list of IDs of a set of DHCP options + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcs', params, [('item', VPC)]) + + def create_vpc(self, cidr_block, instance_tenancy=None, dry_run=False): + """ + Create a new Virtual Private Cloud. + + :type cidr_block: str + :param cidr_block: A valid CIDR block + + :type instance_tenancy: str + :param instance_tenancy: The supported tenancy options for instances + launched into the VPC. Valid values are 'default' and 'dedicated'. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VPC + :return: A :class:`boto.vpc.vpc.VPC` object + """ + params = {'CidrBlock': cidr_block} + if instance_tenancy: + params['InstanceTenancy'] = instance_tenancy + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpc', params, VPC) + + def delete_vpc(self, vpc_id, dry_run=False): + """ + Delete a Virtual Private Cloud. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpc', params) + + def modify_vpc_attribute(self, vpc_id, + enable_dns_support=None, + enable_dns_hostnames=None, dry_run=False): + """ + Modifies the specified attribute of the specified VPC. + You can only modify one attribute at a time. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :type enable_dns_support: bool + :param enable_dns_support: Specifies whether the DNS server + provided by Amazon is enabled for the VPC. + + :type enable_dns_hostnames: bool + :param enable_dns_hostnames: Specifies whether DNS hostnames are + provided for the instances launched in this VPC. You can only + set this attribute to ``true`` if EnableDnsSupport + is also ``true``. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {'VpcId': vpc_id} + if enable_dns_support is not None: + if enable_dns_support: + params['EnableDnsSupport.Value'] = 'true' + else: + params['EnableDnsSupport.Value'] = 'false' + if enable_dns_hostnames is not None: + if enable_dns_hostnames: + params['EnableDnsHostnames.Value'] = 'true' + else: + params['EnableDnsHostnames.Value'] = 'false' + if dry_run: + params['DryRun'] = 'true' + return self.get_status('ModifyVpcAttribute', params) + + # Route Tables + + def get_all_route_tables(self, route_table_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your routing tables. You can filter results + to return information only about those route tables that match your + search parameters. Otherwise, all route tables associated with your + account are returned. + + :type route_table_ids: list + :param route_table_ids: A list of strings with the desired route table + IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.routetable.RouteTable` + """ + params = {} + if route_table_ids: + self.build_list_params(params, route_table_ids, "RouteTableId") + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeRouteTables', params, + [('item', RouteTable)]) + + def associate_route_table(self, route_table_id, subnet_id, dry_run=False): + """ + Associates a route table with a specific subnet. + + :type route_table_id: str + :param route_table_id: The ID of the route table to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: The ID of the association created + """ + params = { + 'RouteTableId': route_table_id, + 'SubnetId': subnet_id + } + if dry_run: + params['DryRun'] = 'true' + result = self.get_object('AssociateRouteTable', params, ResultSet) + return result.associationId + + def disassociate_route_table(self, association_id, dry_run=False): + """ + Removes an association from a route table. This will cause all subnets + that would've used this association to now use the main routing + association instead. + + :type association_id: str + :param association_id: The ID of the association to disassociate. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'AssociationId': association_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisassociateRouteTable', params) + + def create_route_table(self, vpc_id, dry_run=False): + """ + Creates a new route table. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this route table with. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created route table + :return: A :class:`boto.vpc.routetable.RouteTable` object + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateRouteTable', params, RouteTable) + + def delete_route_table(self, route_table_id, dry_run=False): + """ + Delete a route table. + + :type route_table_id: str + :param route_table_id: The ID of the route table to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'RouteTableId': route_table_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRouteTable', params) + + def _replace_route_table_association(self, association_id, + route_table_id, dry_run=False): + """ + Helper function for replace_route_table_association and + replace_route_table_association_with_assoc. Should not be used directly. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: ResultSet + :return: ResultSet of Amazon resposne + """ + params = { + 'AssociationId': association_id, + 'RouteTableId': route_table_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_object('ReplaceRouteTableAssociation', params, + ResultSet) + + def replace_route_table_assocation(self, association_id, + route_table_id, dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. + + NOTE: It may be better to use replace_route_table_association_with_assoc + instead of this function; this function does not return the new + association ID. This function is retained for backwards compatibility. + + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).status + + def replace_route_table_association_with_assoc(self, association_id, + route_table_id, + dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. Returns the new association ID. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: New association ID + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).newAssociationId + + def create_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None, + dry_run=False): + """ + Creates a new route in the route table within a VPC. The route's target + can be either a gateway attached to the VPC or a NAT instance in the + VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: Allows routing to VPC peering + connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id + elif vpc_peering_connection_id is not None: + params['VpcPeeringConnectionId'] = vpc_peering_connection_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('CreateRoute', params) + + def replace_route(self, route_table_id, destination_cidr_block, + gateway_id=None, instance_id=None, interface_id=None, + vpc_peering_connection_id=None, + dry_run=False): + """ + Replaces an existing route within a route table in a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table for the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for the + destination match. + + :type gateway_id: str + :param gateway_id: The ID of the gateway attached to your VPC. + + :type instance_id: str + :param instance_id: The ID of a NAT instance in your VPC. + + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: Allows routing to VPC peering + connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + + if gateway_id is not None: + params['GatewayId'] = gateway_id + elif instance_id is not None: + params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id + elif vpc_peering_connection_id is not None: + params['VpcPeeringConnectionId'] = vpc_peering_connection_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_status('ReplaceRoute', params) + + def delete_route(self, route_table_id, destination_cidr_block, + dry_run=False): + """ + Deletes a route from a route table within a VPC. + + :type route_table_id: str + :param route_table_id: The ID of the route table with the route. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR address block used for + destination match. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'DestinationCidrBlock': destination_cidr_block + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteRoute', params) + + #Network ACLs + + def get_all_network_acls(self, network_acl_ids=None, filters=None): + """ + Retrieve information about your network acls. You can filter results + to return information only about those network acls that match your + search parameters. Otherwise, all network acls associated with your + account are returned. + + :type network_acl_ids: list + :param network_acl_ids: A list of strings with the desired network ACL + IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.networkacl.NetworkAcl` + """ + params = {} + if network_acl_ids: + self.build_list_params(params, network_acl_ids, "NetworkAclId") + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeNetworkAcls', params, + [('item', NetworkAcl)]) + + def associate_network_acl(self, network_acl_id, subnet_id): + """ + Associates a network acl with a specific subnet. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :rtype: str + :return: The ID of the association created + """ + + acl = self.get_all_network_acls(filters=[('association.subnet-id', subnet_id)])[0] + association = [ association for association in acl.associations if association.subnet_id == subnet_id ][0] + + params = { + 'AssociationId': association.id, + 'NetworkAclId': network_acl_id + } + + result = self.get_object('ReplaceNetworkAclAssociation', params, ResultSet) + return result.newAssociationId + + def disassociate_network_acl(self, subnet_id, vpc_id=None): + """ + Figures out what the default ACL is for the VPC, and associates + current network ACL with the default. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to which the ACL belongs. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to which the ACL/subnet belongs. Queries EC2 if omitted. + + :rtype: str + :return: The ID of the association created + """ + if not vpc_id: + vpc_id = self.get_all_subnets([subnet_id])[0].vpc_id + acls = self.get_all_network_acls(filters=[('vpc-id', vpc_id), ('default', 'true')]) + default_acl_id = acls[0].id + + return self.associate_network_acl(default_acl_id, subnet_id) + + def create_network_acl(self, vpc_id): + """ + Creates a new network ACL. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this network ACL with. + + :rtype: The newly created network ACL + :return: A :class:`boto.vpc.networkacl.NetworkAcl` object + """ + params = {'VpcId': vpc_id} + return self.get_object('CreateNetworkAcl', params, NetworkAcl) + + def delete_network_acl(self, network_acl_id): + """ + Delete a network ACL + + :type network_acl_id: str + :param network_acl_id: The ID of the network_acl to delete. + + :rtype: bool + :return: True if successful + """ + params = {'NetworkAclId': network_acl_id} + return self.get_status('DeleteNetworkAcl', params) + + def create_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for this network ACL entry. + + :type rule_number: int + :param rule_number: The rule number to assign to the entry (for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('CreateNetworkAclEntry', params) + + def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for the id you want to replace + + :type rule_number: int + :param rule_number: The rule number that you want to replace(for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('ReplaceNetworkAclEntry', params) + + def delete_network_acl_entry(self, network_acl_id, rule_number, egress=None): + """ + Deletes a network ACL entry from a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL with the network ACL entry. + + :type rule_number: int + :param rule_number: The rule number for the entry to delete. + + :type egress: bool + :param egress: Specifies whether the rule to delete is an egress rule (true) + or ingress rule (false). + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + + return self.get_status('DeleteNetworkAclEntry', params) + + # Internet Gateways + + def get_all_internet_gateways(self, internet_gateway_ids=None, + filters=None, dry_run=False): + """ + Get a list of internet gateways. You can filter results to return information + about only those gateways that you're interested in. + + :type internet_gateway_ids: list + :param internet_gateway_ids: A list of strings with the desired gateway IDs. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + """ + params = {} + + if internet_gateway_ids: + self.build_list_params(params, internet_gateway_ids, + 'InternetGatewayId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeInternetGateways', params, + [('item', InternetGateway)]) + + def create_internet_gateway(self, dry_run=False): + """ + Creates an internet gateway for VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Newly created internet gateway. + :return: `boto.vpc.internetgateway.InternetGateway` + """ + params = {} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateInternetGateway', params, InternetGateway) + + def delete_internet_gateway(self, internet_gateway_id, dry_run=False): + """ + Deletes an internet gateway from the VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to delete. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = {'InternetGatewayId': internet_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteInternetGateway', params) + + def attach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Attach an internet gateway to a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to attach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AttachInternetGateway', params) + + def detach_internet_gateway(self, internet_gateway_id, vpc_id, + dry_run=False): + """ + Detach an internet gateway from a specific VPC. + + :type internet_gateway_id: str + :param internet_gateway_id: The ID of the internet gateway to detach. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to attach to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: Bool + :return: True if successful + """ + params = { + 'InternetGatewayId': internet_gateway_id, + 'VpcId': vpc_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachInternetGateway', params) + + # Customer Gateways + + def get_all_customer_gateways(self, customer_gateway_ids=None, + filters=None, dry_run=False): + """ + Retrieve information about your CustomerGateways. You can filter + results to return information only about those CustomerGateways that + match your search parameters. Otherwise, all CustomerGateways + associated with your account are returned. + + :type customer_gateway_ids: list + :param customer_gateway_ids: A list of strings with the desired + CustomerGateway ID's. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the CustomerGateway + (pending,available,deleting,deleted) + - *type*, the type of customer gateway (ipsec.1) + - *ipAddress* the IP address of customer gateway's + internet-routable external inteface + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.CustomerGateway` + """ + params = {} + if customer_gateway_ids: + self.build_list_params(params, customer_gateway_ids, + 'CustomerGatewayId') + if filters: + self.build_filter_params(params, filters) + + if dry_run: + params['DryRun'] = 'true' + + return self.get_list('DescribeCustomerGateways', params, + [('item', CustomerGateway)]) + + def create_customer_gateway(self, type, ip_address, bgp_asn, dry_run=False): + """ + Create a new Customer Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type ip_address: str + :param ip_address: Internet-routable IP address for customer's gateway. + Must be a static address. + + :type bgp_asn: int + :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP) + Autonomous System Number (ASN) + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created CustomerGateway + :return: A :class:`boto.vpc.customergateway.CustomerGateway` object + """ + params = {'Type': type, + 'IpAddress': ip_address, + 'BgpAsn': bgp_asn} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateCustomerGateway', params, CustomerGateway) + + def delete_customer_gateway(self, customer_gateway_id, dry_run=False): + """ + Delete a Customer Gateway. + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'CustomerGatewayId': customer_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteCustomerGateway', params) + + # VPN Gateways + + def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VpnGateways. You can filter results to + return information only about those VpnGateways that match your search + parameters. Otherwise, all VpnGateways associated with your account + are returned. + + :type vpn_gateway_ids: list + :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VpnGateway + (pending,available,deleting,deleted) + - *type*, a list types of customer gateway (ipsec.1) + - *availabilityZone*, a list of Availability zones the + VPN gateway is in. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.VpnGateway` + """ + params = {} + if vpn_gateway_ids: + self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnGateways', params, + [('item', VpnGateway)]) + + def create_vpn_gateway(self, type, availability_zone=None, dry_run=False): + """ + Create a new Vpn Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1' + + :type availability_zone: str + :param availability_zone: The Availability Zone where you want the VPN gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnGateway + :return: A :class:`boto.vpc.vpngateway.VpnGateway` object + """ + params = {'Type': type} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnGateway', params, VpnGateway) + + def delete_vpn_gateway(self, vpn_gateway_id, dry_run=False): + """ + Delete a Vpn Gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnGateway', params) + + def attach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Attaches a VPN gateway to a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to attach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to attach the gateway to. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: An attachment + :return: a :class:`boto.vpc.vpngateway.Attachment` + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_object('AttachVpnGateway', params, Attachment) + + def detach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Detaches a VPN gateway from a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to detach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to detach the gateway from. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachVpnGateway', params) + + # Subnets + + def get_all_subnets(self, subnet_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your Subnets. You can filter results to + return information only about those Subnets that match your search + parameters. Otherwise, all Subnets associated with your account + are returned. + + :type subnet_ids: list + :param subnet_ids: A list of strings with the desired Subnet ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the Subnet + (pending,available) + - *vpcId*, a list of IDs of the VPC that the subnet is in. + - *cidrBlock*, a list of CIDR blocks of the subnet + - *availabilityZone*, list of the Availability Zones + the subnet is in. + + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.subnet.Subnet` + """ + params = {} + if subnet_ids: + self.build_list_params(params, subnet_ids, 'SubnetId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeSubnets', params, [('item', Subnet)]) + + def create_subnet(self, vpc_id, cidr_block, availability_zone=None, + dry_run=False): + """ + Create a new Subnet + + :type vpc_id: str + :param vpc_id: The ID of the VPC where you want to create the subnet. + + :type cidr_block: str + :param cidr_block: The CIDR block you want the subnet to cover. + + :type availability_zone: str + :param availability_zone: The AZ you want the subnet in + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created Subnet + :return: A :class:`boto.vpc.customergateway.Subnet` object + """ + params = {'VpcId': vpc_id, + 'CidrBlock': cidr_block} + if availability_zone: + params['AvailabilityZone'] = availability_zone + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateSubnet', params, Subnet) + + def delete_subnet(self, subnet_id, dry_run=False): + """ + Delete a subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'SubnetId': subnet_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteSubnet', params) + + # DHCP Options + + def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None, dry_run=False): + """ + Retrieve information about your DhcpOptions. + + :type dhcp_options_ids: list + :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions` + """ + params = {} + if dhcp_options_ids: + self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeDhcpOptions', params, + [('item', DhcpOptions)]) + + def create_dhcp_options(self, domain_name=None, domain_name_servers=None, + ntp_servers=None, netbios_name_servers=None, + netbios_node_type=None, dry_run=False): + """ + Create a new DhcpOption + + This corresponds to + http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-CreateDhcpOptions.html + + :type domain_name: str + :param domain_name: A domain name of your choice (for example, + example.com) + + :type domain_name_servers: list of strings + :param domain_name_servers: The IP address of a domain name server. You + can specify up to four addresses. + + :type ntp_servers: list of strings + :param ntp_servers: The IP address of a Network Time Protocol (NTP) + server. You can specify up to four addresses. + + :type netbios_name_servers: list of strings + :param netbios_name_servers: The IP address of a NetBIOS name server. + You can specify up to four addresses. + + :type netbios_node_type: str + :param netbios_node_type: The NetBIOS node type (1, 2, 4, or 8). For + more information about the values, see RFC 2132. We recommend you + only use 2 at this time (broadcast and multicast are currently not + supported). + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created DhcpOption + :return: A :class:`boto.vpc.customergateway.DhcpOption` object + """ + + key_counter = 1 + params = {} + + def insert_option(params, name, value): + params['DhcpConfiguration.%d.Key' % (key_counter,)] = name + if isinstance(value, (list, tuple)): + for idx, value in enumerate(value, 1): + key_name = 'DhcpConfiguration.%d.Value.%d' % ( + key_counter, idx) + params[key_name] = value + else: + key_name = 'DhcpConfiguration.%d.Value.1' % (key_counter,) + params[key_name] = value + + return key_counter + 1 + + if domain_name: + key_counter = insert_option(params, + 'domain-name', domain_name) + if domain_name_servers: + key_counter = insert_option(params, + 'domain-name-servers', domain_name_servers) + if ntp_servers: + key_counter = insert_option(params, + 'ntp-servers', ntp_servers) + if netbios_name_servers: + key_counter = insert_option(params, + 'netbios-name-servers', netbios_name_servers) + if netbios_node_type: + key_counter = insert_option(params, + 'netbios-node-type', netbios_node_type) + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('CreateDhcpOptions', params, DhcpOptions) + + def delete_dhcp_options(self, dhcp_options_id, dry_run=False): + """ + Delete a DHCP Options + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the DHCP Options to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteDhcpOptions', params) + + def associate_dhcp_options(self, dhcp_options_id, vpc_id, dry_run=False): + """ + Associate a set of Dhcp Options with a VPC. + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the Dhcp Options + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('AssociateDhcpOptions', params) + + # VPN Connection + + def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None, + dry_run=False): + """ + Retrieve information about your VPN_CONNECTIONs. You can filter results to + return information only about those VPN_CONNECTIONs that match your search + parameters. Otherwise, all VPN_CONNECTIONs associated with your account + are returned. + + :type vpn_connection_ids: list + :param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, a list of states of the VPN_CONNECTION + pending,available,deleting,deleted + - *type*, a list of types of connection, currently 'ipsec.1' + - *customerGatewayId*, a list of IDs of the customer gateway + associated with the VPN + - *vpnGatewayId*, a list of IDs of the VPN gateway associated + with the VPN connection + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection` + """ + params = {} + if vpn_connection_ids: + self.build_list_params(params, vpn_connection_ids, + 'VpnConnectionId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpnConnections', params, + [('item', VpnConnection)]) + + def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id, + static_routes_only=None, dry_run=False): + """ + Create a new VPN Connection. + + :type type: str + :param type: The type of VPN Connection. Currently only 'ipsec.1' + is supported + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the VPN gateway. + + :type static_routes_only: bool + :param static_routes_only: Indicates whether the VPN connection + requires static routes. If you are creating a VPN connection + for a device that does not support BGP, you must specify true. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: The newly created VpnConnection + :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object + """ + params = {'Type': type, + 'CustomerGatewayId': customer_gateway_id, + 'VpnGatewayId': vpn_gateway_id} + if static_routes_only is not None: + if isinstance(static_routes_only, bool): + static_routes_only = str(static_routes_only).lower() + params['Options.StaticRoutesOnly'] = static_routes_only + if dry_run: + params['DryRun'] = 'true' + return self.get_object('CreateVpnConnection', params, VpnConnection) + + def delete_vpn_connection(self, vpn_connection_id, dry_run=False): + """ + Delete a VPN Connection. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the vpn_connection to be deleted. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnConnectionId': vpn_connection_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnection', params) + + def disable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Disables a virtual private gateway (VGW) from propagating routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVgwRoutePropagation', params) + + def enable_vgw_route_propagation(self, route_table_id, gateway_id, + dry_run=False): + """ + Enables a virtual private gateway (VGW) to propagate routes to the + routing tables of an Amazon VPC. + + :type route_table_id: str + :param route_table_id: The ID of the routing table. + + :type gateway_id: str + :param gateway_id: The ID of the virtual private gateway. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'RouteTableId': route_table_id, + 'GatewayId': gateway_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVgwRoutePropagation', params) + + def create_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Creates a new static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('CreateVpnConnectionRoute', params) + + def delete_vpn_connection_route(self, destination_cidr_block, + vpn_connection_id, dry_run=False): + """ + Deletes a static route associated with a VPN connection between an + existing virtual private gateway and a VPN customer gateway. The static + route allows traffic to be routed from the virtual private gateway to + the VPN customer gateway. + + :type destination_cidr_block: str + :param destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the VPN connection. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = { + 'DestinationCidrBlock': destination_cidr_block, + 'VpnConnectionId': vpn_connection_id, + } + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpnConnectionRoute', params) + + def get_all_vpc_peering_connections(self, vpc_peering_connection_ids=None, + filters=None, dry_run=False): + """ + Retrieve information about your VPC peering connections. You + can filter results to return information only about those VPC + peering connections that match your search parameters. + Otherwise, all VPC peering connections associated with your + account are returned. + + :type vpc_peering_connection_ids: list + :param vpc_peering_connection_ids: A list of strings with the desired VPC + peering connection ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + * *accepter-vpc-info.cidr-block* - The CIDR block of the peer VPC. + * *accepter-vpc-info.owner-id* - The AWS account ID of the owner + of the peer VPC. + * *accepter-vpc-info.vpc-id* - The ID of the peer VPC. + * *expiration-time* - The expiration date and time for the VPC + peering connection. + * *requester-vpc-info.cidr-block* - The CIDR block of the + requester's VPC. + * *requester-vpc-info.owner-id* - The AWS account ID of the + owner of the requester VPC. + * *requester-vpc-info.vpc-id* - The ID of the requester VPC. + * *status-code* - The status of the VPC peering connection. + * *status-message* - A message that provides more information + about the status of the VPC peering connection, if applicable. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_peering_connection_ids: + self.build_list_params(params, vpc_peering_connection_ids, 'VpcPeeringConnectionId') + if filters: + self.build_filter_params(params, dict(filters)) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcPeeringConnections', params, [('item', VpcPeeringConnection)]) + + def create_vpc_peering_connection(self, vpc_id, peer_vpc_id, + peer_owner_id=None, dry_run=False): + """ + Create a new VPN Peering connection. + + :type vpc_id: str + :param vpc_id: The ID of the requester VPC. + + :type peer_vpc_id: str + :param vpc_peer_id: The ID of the VPC with which you are creating the peering connection. + + :type peer_owner_id: str + :param peer_owner_id: The AWS account ID of the owner of the peer VPC. + + :rtype: The newly created VpcPeeringConnection + :return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object + """ + params = {'VpcId': vpc_id, + 'PeerVpcId': peer_vpc_id } + if peer_owner_id is not None: + params['PeerOwnerId'] = peer_owner_id + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('CreateVpcPeeringConnection', params, + VpcPeeringConnection) + + def delete_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Deletes a VPC peering connection. Either the owner of the requester + VPC or the owner of the peer VPC can delete the VPC peering connection + if it's in the active state. The owner of the requester VPC can delete + a VPC peering connection in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: bool + :return: True if successful + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DeleteVpcPeeringConnection', params) + + def reject_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Rejects a VPC peering connection request. The VPC peering connection + must be in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: bool + :return: True if successful + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + return self.get_status('RejectVpcPeeringConnection', params) + + def accept_vpc_peering_connection(self, vpc_peering_connection_id, dry_run=False): + """ + Acceptss a VPC peering connection request. The VPC peering connection + must be in the pending-acceptance state. + + :type vpc_peering_connection_id: str + :param vpc_peering_connection_id: The ID of the VPC peering connection. + + :rtype: Accepted VpcPeeringConnection + :return: A :class:`boto.vpc.vpc_peering_connection.VpcPeeringConnection` object + """ + params = { + 'VpcPeeringConnectionId': vpc_peering_connection_id + } + + if dry_run: + params['DryRun'] = 'true' + + return self.get_object('AcceptVpcPeeringConnection', params, + VpcPeeringConnection) + + def get_all_classic_link_vpcs(self, vpc_ids=None, filters=None, + dry_run=False): + """ + Describes the ClassicLink status of one or more VPCs. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :type filters: list of tuples or dict + :param filters: A list of tuples or dict containing filters. Each tuple + or dict item consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + self.build_filter_params(params, filters) + if dry_run: + params['DryRun'] = 'true' + return self.get_list('DescribeVpcClassicLink', params, [('item', VPC)], + verb='POST') + + def attach_classic_link_vpc(self, vpc_id, instance_id, groups, + dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type vpc_id: str + :param vpc_id: The ID of a ClassicLink-enabled VPC. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + l = [] + for group in groups: + if hasattr(group, 'id'): + l.append(group.id) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroupId') + return self.get_status('AttachClassicLinkVpc', params) + + def detach_classic_link_vpc(self, vpc_id, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type vpc_id: str + :param vpc_id: The ID of the instance to unlink from the VPC. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id, 'InstanceId': instance_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachClassicLinkVpc', params) + + def disable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DisableVpcClassicLink', params) + + def enable_vpc_classic_link(self, vpc_id, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('EnableVpcClassicLink', params) diff --git a/ext/boto/vpc/customergateway.py b/ext/boto/vpc/customergateway.py new file mode 100644 index 0000000000..8f19a81a56 --- /dev/null +++ b/ext/boto/vpc/customergateway.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Customer Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + + +class CustomerGateway(TaggedEC2Object): + + def __init__(self, connection=None): + super(CustomerGateway, self).__init__(connection) + self.id = None + self.type = None + self.state = None + self.ip_address = None + self.bgp_asn = None + + def __repr__(self): + return 'CustomerGateway:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'customerGatewayId': + self.id = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'bgpAsn': + self.bgp_asn = int(value) + else: + setattr(self, name, value) diff --git a/ext/boto/vpc/dhcpoptions.py b/ext/boto/vpc/dhcpoptions.py new file mode 100644 index 0000000000..758d452ceb --- /dev/null +++ b/ext/boto/vpc/dhcpoptions.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a DHCP Options set +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class DhcpValueSet(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'value': + self.append(value) + +class DhcpConfigSet(dict): + + def startElement(self, name, attrs, connection): + if name == 'valueSet': + if self._name not in self: + self[self._name] = DhcpValueSet() + return self[self._name] + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + +class DhcpOptions(TaggedEC2Object): + + def __init__(self, connection=None): + super(DhcpOptions, self).__init__(connection) + self.id = None + self.options = None + + def __repr__(self): + return 'DhcpOptions:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(DhcpOptions, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'dhcpConfigurationSet': + self.options = DhcpConfigSet() + return self.options + + def endElement(self, name, value, connection): + if name == 'dhcpOptionsId': + self.id = value + else: + setattr(self, name, value) + diff --git a/ext/boto/vpc/internetgateway.py b/ext/boto/vpc/internetgateway.py new file mode 100644 index 0000000000..09f1fe04e7 --- /dev/null +++ b/ext/boto/vpc/internetgateway.py @@ -0,0 +1,72 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an Internet Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class InternetGateway(TaggedEC2Object): + def __init__(self, connection=None): + super(InternetGateway, self).__init__(connection) + self.id = None + self.attachments = [] + + def __repr__(self): + return 'InternetGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(InternetGateway, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'attachmentSet': + self.attachments = ResultSet([('item', InternetGatewayAttachment)]) + return self.attachments + else: + return None + + def endElement(self, name, value, connection): + if name == 'internetGatewayId': + self.id = value + else: + setattr(self, name, value) + +class InternetGatewayAttachment(object): + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def __repr__(self): + return 'InternetGatewayAttachment:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value diff --git a/ext/boto/vpc/networkacl.py b/ext/boto/vpc/networkacl.py new file mode 100644 index 0000000000..9b8b1cddcf --- /dev/null +++ b/ext/boto/vpc/networkacl.py @@ -0,0 +1,164 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Network ACL +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + + +class Icmp(object): + """ + Defines the ICMP code and type. + """ + def __init__(self, connection=None): + self.code = None + self.type = None + + def __repr__(self): + return 'Icmp::code:%s, type:%s)' % ( self.code, self.type) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'code': + self.code = value + elif name == 'type': + self.type = value + +class NetworkAcl(TaggedEC2Object): + + def __init__(self, connection=None): + super(NetworkAcl, self).__init__(connection) + self.id = None + self.vpc_id = None + self.network_acl_entries = [] + self.associations = [] + + def __repr__(self): + return 'NetworkAcl:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(NetworkAcl, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'entrySet': + self.network_acl_entries = ResultSet([('item', NetworkAclEntry)]) + return self.network_acl_entries + elif name == 'associationSet': + self.associations = ResultSet([('item', NetworkAclAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'networkAclId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class NetworkAclEntry(object): + def __init__(self, connection=None): + self.rule_number = None + self.protocol = None + self.rule_action = None + self.egress = None + self.cidr_block = None + self.port_range = PortRange() + self.icmp = Icmp() + + def __repr__(self): + return 'Acl:%s' % self.rule_number + + def startElement(self, name, attrs, connection): + + if name == 'portRange': + return self.port_range + elif name == 'icmpTypeCode': + return self.icmp + else: + return None + + def endElement(self, name, value, connection): + if name == 'cidrBlock': + self.cidr_block = value + elif name == 'egress': + self.egress = value + elif name == 'protocol': + self.protocol = value + elif name == 'ruleAction': + self.rule_action = value + elif name == 'ruleNumber': + self.rule_number = value + + +class NetworkAclAssociation(object): + def __init__(self, connection=None): + self.id = None + self.subnet_id = None + self.network_acl_id = None + + def __repr__(self): + return 'NetworkAclAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'networkAclAssociationId': + self.id = value + elif name == 'networkAclId': + self.network_acl_id = value + elif name == 'subnetId': + self.subnet_id = value + +class PortRange(object): + """ + Define the port range for the ACL entry if it is tcp / udp + """ + + def __init__(self, connection=None): + self.from_port = None + self.to_port = None + + def __repr__(self): + return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'from': + self.from_port = value + elif name == 'to': + self.to_port = value + + diff --git a/ext/boto/vpc/routetable.py b/ext/boto/vpc/routetable.py new file mode 100644 index 0000000000..d464e2ed51 --- /dev/null +++ b/ext/boto/vpc/routetable.py @@ -0,0 +1,118 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Route Table +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + +class RouteTable(TaggedEC2Object): + + def __init__(self, connection=None): + super(RouteTable, self).__init__(connection) + self.id = None + self.vpc_id = None + self.routes = [] + self.associations = [] + + def __repr__(self): + return 'RouteTable:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(RouteTable, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'routeSet': + self.routes = ResultSet([('item', Route)]) + return self.routes + elif name == 'associationSet': + self.associations = ResultSet([('item', RouteAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'routeTableId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class Route(object): + def __init__(self, connection=None): + self.destination_cidr_block = None + self.gateway_id = None + self.instance_id = None + self.interface_id = None + self.vpc_peering_connection_id = None + self.state = None + self.origin = None + + def __repr__(self): + return 'Route:%s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'gatewayId': + self.gateway_id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'networkInterfaceId': + self.interface_id = value + elif name == 'vpcPeeringConnectionId': + self.vpc_peering_connection_id = value + elif name == 'state': + self.state = value + elif name == 'origin': + self.origin = value + +class RouteAssociation(object): + def __init__(self, connection=None): + self.id = None + self.route_table_id = None + self.subnet_id = None + self.main = False + + def __repr__(self): + return 'RouteAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'routeTableAssociationId': + self.id = value + elif name == 'routeTableId': + self.route_table_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'main': + self.main = value == 'true' diff --git a/ext/boto/vpc/subnet.py b/ext/boto/vpc/subnet.py new file mode 100644 index 0000000000..4d6f902539 --- /dev/null +++ b/ext/boto/vpc/subnet.py @@ -0,0 +1,57 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Subnet +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Subnet(TaggedEC2Object): + + def __init__(self, connection=None): + super(Subnet, self).__init__(connection) + self.id = None + self.vpc_id = None + self.state = None + self.cidr_block = None + self.available_ip_address_count = 0 + self.availability_zone = None + + def __repr__(self): + return 'Subnet:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'subnetId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'availableIpAddressCount': + self.available_ip_address_count = int(value) + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) + diff --git a/ext/boto/vpc/vpc.py b/ext/boto/vpc/vpc.py new file mode 100644 index 0000000000..219a0b590b --- /dev/null +++ b/ext/boto/vpc/vpc.py @@ -0,0 +1,204 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Virtual Private Cloud. +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VPC(TaggedEC2Object): + + def __init__(self, connection=None): + """ + Represents a VPC. + + :ivar id: The unique ID of the VPC. + :ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC + (or default if the default options are associated with the VPC). + :ivar state: The current state of the VPC. + :ivar cidr_block: The CIDR block for the VPC. + :ivar is_default: Indicates whether the VPC is the default VPC. + :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC. + :ivar classic_link_enabled: Indicates whether ClassicLink is enabled. + """ + super(VPC, self).__init__(connection) + self.id = None + self.dhcp_options_id = None + self.state = None + self.cidr_block = None + self.is_default = None + self.instance_tenancy = None + self.classic_link_enabled = None + + def __repr__(self): + return 'VPC:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.id = value + elif name == 'dhcpOptionsId': + self.dhcp_options_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'isDefault': + self.is_default = True if value == 'true' else False + elif name == 'instanceTenancy': + self.instance_tenancy = value + elif name == 'classicLinkEnabled': + self.classic_link_enabled = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc(self.id) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def _get_status_then_update_vpc(self, get_status_method, validate=False, + dry_run=False): + vpc_list = get_status_method( + [self.id], + dry_run=dry_run + ) + if len(vpc_list): + updated_vpc = vpc_list[0] + self._update(updated_vpc) + elif validate: + raise ValueError('%s is not a valid VPC ID' % (self.id,)) + + def update(self, validate=False, dry_run=False): + self._get_status_then_update_vpc( + self.connection.get_all_vpcs, + validate=validate, + dry_run=dry_run + ) + return self.state + + def update_classic_link_enabled(self, validate=False, dry_run=False): + """ + Updates instance's classic_link_enabled attribute + + :rtype: bool + :return: self.classic_link_enabled after update has occurred. + """ + self._get_status_then_update_vpc( + self.connection.get_all_classic_link_vpcs, + validate=validate, + dry_run=dry_run + ) + return self.classic_link_enabled + + def disable_classic_link(self, dry_run=False): + """ + Disables ClassicLink for a VPC. You cannot disable ClassicLink for a + VPC that has EC2-Classic instances linked to it. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.disable_vpc_classic_link(self.id, + dry_run=dry_run) + + def enable_classic_link(self, dry_run=False): + """ + Enables a VPC for ClassicLink. You can then link EC2-Classic instances + to your ClassicLink-enabled VPC to allow communication over private IP + addresses. You cannot enable your VPC for ClassicLink if any of your + VPC's route tables have existing routes for address ranges within the + 10.0.0.0/8 IP address range, excluding local routes for VPCs in the + 10.0.0.0/16 and 10.1.0.0/16 IP address ranges. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.enable_vpc_classic_link(self.id, + dry_run=dry_run) + + def attach_classic_instance(self, instance_id, groups, dry_run=False): + """ + Links an EC2-Classic instance to a ClassicLink-enabled VPC through one + or more of the VPC's security groups. You cannot link an EC2-Classic + instance to more than one VPC at a time. You can only link an instance + that's in the running state. An instance is automatically unlinked from + a VPC when it's stopped. You can link it to the VPC again when you + restart it. + + After you've linked an instance, you cannot change the VPC security + groups that are associated with it. To change the security groups, you + must first unlink the instance, and then link it again. + + Linking your instance to a VPC is sometimes referred to as attaching + your instance. + + :type intance_id: str + :param instance_is: The ID of a ClassicLink-enabled VPC. + + :tye groups: list + :param groups: The ID of one or more of the VPC's security groups. + You cannot specify security groups from a different VPC. The + members of the list can be + :class:`boto.ec2.securitygroup.SecurityGroup` objects or + strings of the id's of the security groups. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + groups=groups, + dry_run=dry_run + ) + + def detach_classic_instance(self, instance_id, dry_run=False): + """ + Unlinks a linked EC2-Classic instance from a VPC. After the instance + has been unlinked, the VPC security groups are no longer associated + with it. An instance is automatically unlinked from a VPC when + it's stopped. + + :type intance_id: str + :param instance_is: The ID of the VPC to which the instance is linked. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self.connection.detach_classic_link_vpc( + vpc_id=self.id, + instance_id=instance_id, + dry_run=dry_run + ) diff --git a/ext/boto/vpc/vpc_peering_connection.py b/ext/boto/vpc/vpc_peering_connection.py new file mode 100644 index 0000000000..cdb9af8dae --- /dev/null +++ b/ext/boto/vpc/vpc_peering_connection.py @@ -0,0 +1,163 @@ +# Copyright (c) 2014 Skytap http://skytap.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPC Peering Connection. +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VpcInfo(object): + def __init__(self): + """ + Information on peer Vpc. + + :ivar id: The unique ID of peer Vpc. + :ivar owner_id: Owner of peer Vpc. + :ivar cidr_block: CIDR Block of peer Vpc. + """ + + self.vpc_id = None + self.owner_id = None + self.cidr_block = None + + def __repr__(self): + return 'VpcInfo:%s' % self.vpc_id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'cidrBlock': + self.cidr_block = value + else: + setattr(self, name, value) + +class VpcPeeringConnectionStatus(object): + """ + The status of VPC peering connection. + + :ivar code: The status of the VPC peering connection. Valid values are: + + * pending-acceptance + * failed + * expired + * provisioning + * active + * deleted + * rejected + + :ivar message: A message that provides more information about the status of the VPC peering connection, if applicable. + """ + def __init__(self, code=0, message=None): + self.code = code + self.message = message + + def __repr__(self): + return '%s(%d)' % (self.code, self.message) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + else: + setattr(self, name, value) + + + +class VpcPeeringConnection(TaggedEC2Object): + + def __init__(self, connection=None): + """ + Represents a VPC peering connection. + + :ivar id: The unique ID of the VPC peering connection. + :ivar accepter_vpc_info: Information on peer Vpc. + :ivar requester_vpc_info: Information on requester Vpc. + :ivar expiration_time: The expiration date and time for the VPC peering connection. + :ivar status_code: The status of the VPC peering connection. + :ivar status_message: A message that provides more information about the status of the VPC peering connection, if applicable. + """ + super(VpcPeeringConnection, self).__init__(connection) + self.id = None + self.accepter_vpc_info = VpcInfo() + self.requester_vpc_info = VpcInfo() + self.expiration_time = None + self._status = VpcPeeringConnectionStatus() + + @property + def status_code(self): + return self._status.code + + @property + def status_message(self): + return self._status.message + + def __repr__(self): + return 'VpcPeeringConnection:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpcPeeringConnection, self).startElement(name, attrs, connection) + if retval is not None: + return retval + + if name == 'requesterVpcInfo': + return self.requester_vpc_info + elif name == 'accepterVpcInfo': + return self.accepter_vpc_info + elif name == 'status': + return self._status + + return None + + def endElement(self, name, value, connection): + if name == 'vpcPeeringConnectionId': + self.id = value + elif name == 'expirationTime': + self.expiration_time = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc_peering_connection(self.id) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self, validate=False, dry_run=False): + vpc_peering_connection_list = self.connection.get_all_vpc_peering_connections( + [self.id], + dry_run=dry_run + ) + if len(vpc_peering_connection_list): + updated_vpc_peering_connection = vpc_peering_connection_list[0] + self._update(updated_vpc_peering_connection) + elif validate: + raise ValueError('%s is not a valid VpcPeeringConnection ID' % (self.id,)) + return self.status_code diff --git a/ext/boto/vpc/vpnconnection.py b/ext/boto/vpc/vpnconnection.py new file mode 100644 index 0000000000..cd8b11a62b --- /dev/null +++ b/ext/boto/vpc/vpnconnection.py @@ -0,0 +1,204 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from datetime import datetime +from boto.resultset import ResultSet + +""" +Represents a VPN Connectionn +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class VpnConnectionOptions(object): + """ + Represents VPN connection options + + :ivar static_routes_only: Indicates whether the VPN connection uses static + routes only. Static routes must be used for devices that don't support + BGP. + + """ + def __init__(self, static_routes_only=None): + self.static_routes_only = static_routes_only + + def __repr__(self): + return 'VpnConnectionOptions' + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'staticRoutesOnly': + self.static_routes_only = True if value == 'true' else False + else: + setattr(self, name, value) + +class VpnStaticRoute(object): + """ + Represents a static route for a VPN connection. + + :ivar destination_cidr_block: The CIDR block associated with the local + subnet of the customer data center. + :ivar source: Indicates how the routes were provided. + :ivar state: The current state of the static route. + """ + def __init__(self, destination_cidr_block=None, source=None, state=None): + self.destination_cidr_block = destination_cidr_block + self.source = source + self.available = state + + def __repr__(self): + return 'VpnStaticRoute: %s' % self.destination_cidr_block + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'destinationCidrBlock': + self.destination_cidr_block = value + elif name == 'source': + self.source = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnTunnel(object): + """ + Represents telemetry for a VPN tunnel + + :ivar outside_ip_address: The Internet-routable IP address of the + virtual private gateway's outside interface. + :ivar status: The status of the VPN tunnel. Valid values: UP | DOWN + :ivar last_status_change: The date and time of the last change in status. + :ivar status_message: If an error occurs, a description of the error. + :ivar accepted_route_count: The number of accepted routes. + """ + def __init__(self, outside_ip_address=None, status=None, last_status_change=None, + status_message=None, accepted_route_count=None): + self.outside_ip_address = outside_ip_address + self.status = status + self.last_status_change = last_status_change + self.status_message = status_message + self.accepted_route_count = accepted_route_count + + def __repr__(self): + return 'VpnTunnel: %s' % self.outside_ip_address + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'outsideIpAddress': + self.outside_ip_address = value + elif name == 'status': + self.status = value + elif name == 'lastStatusChange': + self.last_status_change = datetime.strptime(value, + '%Y-%m-%dT%H:%M:%S.%fZ') + elif name == 'statusMessage': + self.status_message = value + elif name == 'acceptedRouteCount': + try: + value = int(value) + except ValueError: + boto.log.warning('Error converting code (%s) to int' % value) + self.accepted_route_count = value + else: + setattr(self, name, value) + +class VpnConnection(TaggedEC2Object): + """ + Represents a VPN Connection + + :ivar id: The ID of the VPN connection. + :ivar state: The current state of the VPN connection. + Valid values: pending | available | deleting | deleted + :ivar customer_gateway_configuration: The configuration information for the + VPN connection's customer gateway (in the native XML format). This + element is always present in the + :class:`boto.vpc.VPCConnection.create_vpn_connection` response; + however, it's present in the + :class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only + if the VPN connection is in the pending or available state. + :ivar type: The type of VPN connection (ipsec.1). + :ivar customer_gateway_id: The ID of the customer gateway at your end of + the VPN connection. + :ivar vpn_gateway_id: The ID of the virtual private gateway + at the AWS side of the VPN connection. + :ivar tunnels: A list of the vpn tunnels (always 2) + :ivar options: The option set describing the VPN connection. + :ivar static_routes: A list of static routes associated with a VPN + connection. + + """ + def __init__(self, connection=None): + super(VpnConnection, self).__init__(connection) + self.id = None + self.state = None + self.customer_gateway_configuration = None + self.type = None + self.customer_gateway_id = None + self.vpn_gateway_id = None + self.tunnels = [] + self.options = None + self.static_routes = [] + + def __repr__(self): + return 'VpnConnection:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpnConnection, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'vgwTelemetry': + self.tunnels = ResultSet([('item', VpnTunnel)]) + return self.tunnels + elif name == 'routes': + self.static_routes = ResultSet([('item', VpnStaticRoute)]) + return self.static_routes + elif name == 'options': + self.options = VpnConnectionOptions() + return self.options + return None + + def endElement(self, name, value, connection): + if name == 'vpnConnectionId': + self.id = value + elif name == 'state': + self.state = value + elif name == 'customerGatewayConfiguration': + self.customer_gateway_configuration = value + elif name == 'type': + self.type = value + elif name == 'customerGatewayId': + self.customer_gateway_id = value + elif name == 'vpnGatewayId': + self.vpn_gateway_id = value + else: + setattr(self, name, value) + + def delete(self, dry_run=False): + return self.connection.delete_vpn_connection( + self.id, + dry_run=dry_run + ) diff --git a/ext/boto/vpc/vpngateway.py b/ext/boto/vpc/vpngateway.py new file mode 100644 index 0000000000..80598109c3 --- /dev/null +++ b/ext/boto/vpc/vpngateway.py @@ -0,0 +1,87 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Vpn Gateway +""" + +from boto.ec2.ec2object import TaggedEC2Object + +class Attachment(object): + + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnGateway(TaggedEC2Object): + + def __init__(self, connection=None): + super(VpnGateway, self).__init__(connection) + self.id = None + self.type = None + self.state = None + self.availability_zone = None + self.attachments = [] + + def __repr__(self): + return 'VpnGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + retval = super(VpnGateway, self).startElement(name, attrs, connection) + if retval is not None: + return retval + if name == 'item': + att = Attachment() + self.attachments.append(att) + return att + + def endElement(self, name, value, connection): + if name == 'vpnGatewayId': + self.id = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'attachments': + pass + else: + setattr(self, name, value) + + def attach(self, vpc_id, dry_run=False): + return self.connection.attach_vpn_gateway( + self.id, + vpc_id, + dry_run=dry_run + ) + diff --git a/ext/certifi/__init__.py b/ext/certifi/__init__.py index a76cd477e7..b7875793d7 100644 --- a/ext/certifi/__init__.py +++ b/ext/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import where, old_where -__version__ = "2017.07.27.1" +__version__ = "2017.11.05" diff --git a/ext/certifi/cacert.pem b/ext/certifi/cacert.pem index 44313cb054..445bf1ca6d 100644 --- a/ext/certifi/cacert.pem +++ b/ext/certifi/cacert.pem @@ -356,33 +356,6 @@ LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd 398znM/jra6O1I7mT1GvFpLgXPYHDw== -----END CERTIFICATE----- -# Issuer: CN=Certum CA O=Unizeto Sp. z o.o. -# Subject: CN=Certum CA O=Unizeto Sp. z o.o. -# Label: "Certum Root CA" -# Serial: 65568 -# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9 -# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18 -# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24 ------BEGIN CERTIFICATE----- -MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E -jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo -ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI -ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu -Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg -AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 -HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA -uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa -TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg -xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q -CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x -O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs -6GAqm4VKQPNriiTsBhYscw== ------END CERTIFICATE----- - # Issuer: CN=AAA Certificate Services O=Comodo CA Limited # Subject: CN=AAA Certificate Services O=Comodo CA Limited # Label: "Comodo AAA Services root" @@ -769,58 +742,6 @@ VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 1 -# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 -# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f -# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea ------BEGIN CERTIFICATE----- -MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE -FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j -ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js -LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM -BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 -Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy -dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh -cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh -YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg -dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp -bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ -YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT -TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ -9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 -jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW -FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz -ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 -ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L -EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu -L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq -yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC -O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V -um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh -NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= ------END CERTIFICATE----- - # Issuer: O=Government Root Certification Authority # Subject: O=Government Root Certification Authority # Label: "Taiwan GRCA" @@ -1562,44 +1483,6 @@ W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D hNQ+IIX3Sj0rnP0qCglN6oH4EZw= -----END CERTIFICATE----- -# Issuer: CN=T\xdcB\u0130TAK UEKAE K\xf6k Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 - S\xfcr\xfcm 3 O=T\xfcrkiye Bilimsel ve Teknolojik Ara\u015ft\u0131rma Kurumu - T\xdcB\u0130TAK OU=Ulusal Elektronik ve Kriptoloji Ara\u015ft\u0131rma Enstit\xfcs\xfc - UEKAE/Kamu Sertifikasyon Merkezi -# Subject: CN=T\xdcB\u0130TAK UEKAE K\xf6k Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 - S\xfcr\xfcm 3 O=T\xfcrkiye Bilimsel ve Teknolojik Ara\u015ft\u0131rma Kurumu - T\xdcB\u0130TAK OU=Ulusal Elektronik ve Kriptoloji Ara\u015ft\u0131rma Enstit\xfcs\xfc - UEKAE/Kamu Sertifikasyon Merkezi -# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3" -# Serial: 17 -# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26 -# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96 -# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a ------BEGIN CERTIFICATE----- -MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS -MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp -bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw -VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy -YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy -dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 -ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe -Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx -GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls -aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU -QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh -xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 -aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr -IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h -gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK -O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO -fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw -lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL -hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID -AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP -NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t -wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM -7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh -gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n -oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs -yZyQ2uypQjyttgI= ------END CERTIFICATE----- - # Issuer: O=certSIGN OU=certSIGN ROOT CA # Subject: O=certSIGN OU=certSIGN ROOT CA # Label: "certSIGN ROOT CA" @@ -1937,47 +1820,6 @@ pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN QSdJQO7e5iNEOdyhIta6A/I= -----END CERTIFICATE----- -# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI -# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI -# Label: "ACEDICOM Root" -# Serial: 7029493972724711941 -# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6 -# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84 -# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a ------BEGIN CERTIFICATE----- -MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE -AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x -CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW -MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF -RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC -AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7 -09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7 -XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P -Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK -t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb -X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28 -MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU -fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI -2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH -K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae -ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP -BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ -MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw -RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv -bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm -fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3 -gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe -I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i -5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi -ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn -MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ -o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6 -zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN -GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt -r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK -Z05phkOTOPu220+DkdRgfks+KzgHVZhepA== ------END CERTIFICATE----- - # Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Label: "Microsec e-Szigno Root CA 2009" @@ -2463,46 +2305,6 @@ VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI 03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= -----END CERTIFICATE----- -# Issuer: CN=Certinomis - Autorit\xe9 Racine O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Autorit\xe9 Racine O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Autorit\xe9 Racine" -# Serial: 1 -# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a -# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3 -# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17 ------BEGIN CERTIFICATE----- -MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk -BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 -Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl -cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 -aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY -F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N -8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe -rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K -/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu -7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC -28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 -lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E -nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB -0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 -5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj -WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN -jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ -KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s -ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM -OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q -619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn -2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj -o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v -nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG -5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq -pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb -dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 -BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -2664,96 +2466,6 @@ jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN ZetX2fNXlrtIzYE= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 45 -# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 -# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 -# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 ------BEGIN CERTIFICATE----- -MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD -VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul -F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC -ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w -ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk -aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 -YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg -c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 -d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG -CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 -dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF -wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS -Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst -0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc -pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl -CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF -P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK -1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm -KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE -JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ -8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm -fyWl8kgAwKQB2j8= ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Label: "StartCom Certification Authority G2" -# Serial: 59 -# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 -# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 -# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 ------BEGIN CERTIFICATE----- -MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 -OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG -A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ -JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD -vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo -D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ -Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW -RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK -HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN -nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM -0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i -UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 -Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg -TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE -AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL -BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K -2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX -UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl -6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK -9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ -HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI -wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY -XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l -IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo -hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr -so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI ------END CERTIFICATE----- - # Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Label: "Buypass Class 2 Root CA" @@ -2895,39 +2607,6 @@ iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= -----END CERTIFICATE----- -# Issuer: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. (c) Aral\u0131k 2007 -# Subject: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. (c) Aral\u0131k 2007 -# Label: "TURKTRUST Certificate Services Provider Root 2007" -# Serial: 1 -# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72 -# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33 -# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50 ------BEGIN CERTIFICATE----- -MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx -OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry -b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC -VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE -sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F -ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY -KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG -+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG -HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P -IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M -733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk -Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G -CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW -AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I -aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 -mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa -XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ -qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 ------END CERTIFICATE----- - # Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Label: "D-TRUST Root Class 3 CA 2 2009" @@ -2994,67 +2673,6 @@ xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 -----END CERTIFICATE----- -# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica -# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT -# Label: "PSCProcert" -# Serial: 11 -# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec -# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74 -# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0 ------BEGIN CERTIFICATE----- -MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1 -dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s -YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz -dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 -aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh -IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ -KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw -MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy -b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx -KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG -A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u -aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9 -7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74 -BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G -ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9 -JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0 -PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2 -0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH -0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/ -6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m -v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7 -K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev -bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw -MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w -MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD -gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0 -b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh -bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0 -cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp -ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg -ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq -hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD -AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w -MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag -RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t -UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl -cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v -Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG -AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN -AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS -1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB -3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv -Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh -HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm -pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz -sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE -qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb -mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9 -opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H -YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km ------END CERTIFICATE----- - # Issuer: CN=CA Disig Root R1 O=Disig a.s. # Subject: CN=CA Disig Root R1 O=Disig a.s. # Label: "CA Disig Root R1" @@ -3629,85 +3247,6 @@ r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited -# Label: "WoSign" -# Serial: 125491772294754854453622855443212256657 -# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d -# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb -# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08 ------BEGIN CERTIFICATE----- -MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV -BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw -MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX -b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN -rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U -fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc -f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2 -ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M -x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR -aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch -zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar -uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K -mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA -Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv -HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H -EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 -LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ -MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e -JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN -g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp -dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab -R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ -PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce -xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+ -J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl -OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT -ee5Ehr7XHuQe+w== ------END CERTIFICATE----- - -# Issuer: CN=CA \u6c83\u901a\u6839\u8bc1\u4e66 O=WoSign CA Limited -# Subject: CN=CA \u6c83\u901a\u6839\u8bc1\u4e66 O=WoSign CA Limited -# Label: "WoSign China" -# Serial: 106921963437422998931660691310149453965 -# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93 -# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6 -# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54 ------BEGIN CERTIFICATE----- -MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV -BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw -MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl -ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r -D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1 -9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf -v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk -UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L -NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb -+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V -qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K -yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G -AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK -J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC -AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O -BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4 -WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 -yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj -/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6 -jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2 -ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX -X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n -FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D -u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l -O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le -ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1 -2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ== ------END CERTIFICATE----- - # Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Label: "COMODO RSA Certification Authority" @@ -4219,56 +3758,6 @@ aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Label: "Certification Authority of WoSign G2" -# Serial: 142423943073812161787490648904721057092 -# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 -# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 -# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV -BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx -MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK -ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo -b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX -JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO -gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg -5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n -fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 -2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ -KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 -fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G -3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy -SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng -LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 -XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= ------END CERTIFICATE----- - -# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited -# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited -# Label: "CA WoSign ECC Root" -# Serial: 138625735294506723296996289575837012112 -# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 -# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b -# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 ------BEGIN CERTIFICATE----- -MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw -CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT -EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 -NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb -MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID -YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 -KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES -1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB -1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 -aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K ------END CERTIFICATE----- - # Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Label: "SZAFIR ROOT CA2" @@ -4832,3 +4321,285 @@ lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c 8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= -----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- diff --git a/ext/certifi/core.py b/ext/certifi/core.py index f41d3fd1b7..dd4cdea76c 100644 --- a/ext/certifi/core.py +++ b/ext/certifi/core.py @@ -26,7 +26,8 @@ def where(): def old_where(): warnings.warn( - "The weak security bundle is being deprecated.", + "The weak security bundle is being deprecated. It will be removed in " + "2018.", DeprecatedBundleWarning ) f = os.path.dirname(__file__) diff --git a/ext/certifi/weak.pem b/ext/certifi/weak.pem index 380c5233af..4426034f46 100644 --- a/ext/certifi/weak.pem +++ b/ext/certifi/weak.pem @@ -356,33 +356,6 @@ LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd 398znM/jra6O1I7mT1GvFpLgXPYHDw== -----END CERTIFICATE----- -# Issuer: CN=Certum CA O=Unizeto Sp. z o.o. -# Subject: CN=Certum CA O=Unizeto Sp. z o.o. -# Label: "Certum Root CA" -# Serial: 65568 -# MD5 Fingerprint: 2c:8f:9f:66:1d:18:90:b1:47:26:9d:8e:86:82:8c:a9 -# SHA1 Fingerprint: 62:52:dc:40:f7:11:43:a2:2f:de:9e:f7:34:8e:06:42:51:b1:81:18 -# SHA256 Fingerprint: d8:e0:fe:bc:1d:b2:e3:8d:00:94:0f:37:d2:7d:41:34:4d:99:3e:73:4b:99:d5:65:6d:97:78:d4:d8:14:36:24 ------BEGIN CERTIFICATE----- -MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM -MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD -QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E -jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo -ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI -ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu -Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg -AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 -HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA -uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa -TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg -xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q -CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x -O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs -6GAqm4VKQPNriiTsBhYscw== ------END CERTIFICATE----- - # Issuer: CN=AAA Certificate Services O=Comodo CA Limited # Subject: CN=AAA Certificate Services O=Comodo CA Limited # Label: "Comodo AAA Services root" @@ -769,58 +742,6 @@ VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 1 -# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16 -# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f -# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea ------BEGIN CERTIFICATE----- -MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE -FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j -ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js -LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM -BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 -Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy -dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh -cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh -YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg -dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp -bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ -YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT -TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ -9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 -jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW -FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz -ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 -ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L -EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu -L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq -yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC -O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V -um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh -NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= ------END CERTIFICATE----- - # Issuer: O=Government Root Certification Authority # Subject: O=Government Root Certification Authority # Label: "Taiwan GRCA" @@ -1562,44 +1483,6 @@ W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D hNQ+IIX3Sj0rnP0qCglN6oH4EZw= -----END CERTIFICATE----- -# Issuer: CN=T\xdcB\u0130TAK UEKAE K\xf6k Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 - S\xfcr\xfcm 3 O=T\xfcrkiye Bilimsel ve Teknolojik Ara\u015ft\u0131rma Kurumu - T\xdcB\u0130TAK OU=Ulusal Elektronik ve Kriptoloji Ara\u015ft\u0131rma Enstit\xfcs\xfc - UEKAE/Kamu Sertifikasyon Merkezi -# Subject: CN=T\xdcB\u0130TAK UEKAE K\xf6k Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 - S\xfcr\xfcm 3 O=T\xfcrkiye Bilimsel ve Teknolojik Ara\u015ft\u0131rma Kurumu - T\xdcB\u0130TAK OU=Ulusal Elektronik ve Kriptoloji Ara\u015ft\u0131rma Enstit\xfcs\xfc - UEKAE/Kamu Sertifikasyon Merkezi -# Label: "T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3" -# Serial: 17 -# MD5 Fingerprint: ed:41:f5:8c:50:c5:2b:9c:73:e6:ee:6c:eb:c2:a8:26 -# SHA1 Fingerprint: 1b:4b:39:61:26:27:6b:64:91:a2:68:6d:d7:02:43:21:2d:1f:1d:96 -# SHA256 Fingerprint: e4:c7:34:30:d7:a5:b5:09:25:df:43:37:0a:0d:21:6e:9a:79:b9:d6:db:83:73:a0:c6:9e:b1:cc:31:c7:c5:2a ------BEGIN CERTIFICATE----- -MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS -MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp -bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw -VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy -YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy -dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 -ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe -Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx -GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls -aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU -QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh -xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 -aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr -IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h -gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK -O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO -fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw -lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL -hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID -AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP -NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t -wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM -7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh -gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n -oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs -yZyQ2uypQjyttgI= ------END CERTIFICATE----- - # Issuer: O=certSIGN OU=certSIGN ROOT CA # Subject: O=certSIGN OU=certSIGN ROOT CA # Label: "certSIGN ROOT CA" @@ -1937,47 +1820,6 @@ pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN QSdJQO7e5iNEOdyhIta6A/I= -----END CERTIFICATE----- -# Issuer: CN=ACEDICOM Root O=EDICOM OU=PKI -# Subject: CN=ACEDICOM Root O=EDICOM OU=PKI -# Label: "ACEDICOM Root" -# Serial: 7029493972724711941 -# MD5 Fingerprint: 42:81:a0:e2:1c:e3:55:10:de:55:89:42:65:96:22:e6 -# SHA1 Fingerprint: e0:b4:32:2e:b2:f6:a5:68:b6:54:53:84:48:18:4a:50:36:87:43:84 -# SHA256 Fingerprint: 03:95:0f:b4:9a:53:1f:3e:19:91:94:23:98:df:a9:e0:ea:32:d7:ba:1c:dd:9b:c8:5d:b5:7e:d9:40:0b:43:4a ------BEGIN CERTIFICATE----- -MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UE -AwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00x -CzAJBgNVBAYTAkVTMB4XDTA4MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEW -MBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZF -RElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC -AgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHkWLn7 -09gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7 -XBZXehuDYAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5P -Grjm6gSSrj0RuVFCPYewMYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAK -t0SdE3QrwqXrIhWYENiLxQSfHY9g5QYbm8+5eaA9oiM/Qj9r+hwDezCNzmzAv+Yb -X79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbkHQl/Sog4P75n/TSW9R28 -MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTTxKJxqvQU -fecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI -2Sf23EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyH -K9caUPgn6C9D4zq92Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEae -ZAwUswdbxcJzbPEHXEUkFDWug/FqTYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAP -BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz4SsrSbbXc6GqlPUB53NlTKxQ -MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU9QHnc2VMrFAw -RAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv -bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWIm -fQwng4/F9tqgaHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3 -gvoFNTPhNahXwOf9jU8/kzJPeGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKe -I6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1PwkzQSulgUV1qzOMPPKC8W64iLgpq0i -5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1ThCojz2GuHURwCRi -ipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oIKiMn -MCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZ -o5NjEFIqnxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6 -zqylfDJKZ0DcMDQj3dcEI2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacN -GHk0vFQYXlPKNFHtRQrmjseCNj6nOGOpMCwXEGCSn1WHElkQwg9naRHMTh5+Spqt -r0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3otkYNbn5XOmeUwssfnHdK -Z05phkOTOPu220+DkdRgfks+KzgHVZhepA== ------END CERTIFICATE----- - # Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd. # Label: "Microsec e-Szigno Root CA 2009" @@ -2463,46 +2305,6 @@ VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI 03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= -----END CERTIFICATE----- -# Issuer: CN=Certinomis - Autorit\xe9 Racine O=Certinomis OU=0002 433998903 -# Subject: CN=Certinomis - Autorit\xe9 Racine O=Certinomis OU=0002 433998903 -# Label: "Certinomis - Autorit\xe9 Racine" -# Serial: 1 -# MD5 Fingerprint: 7f:30:78:8c:03:e3:ca:c9:0a:e2:c9:ea:1e:aa:55:1a -# SHA1 Fingerprint: 2e:14:da:ec:28:f0:fa:1e:8e:38:9a:4e:ab:eb:26:c0:0a:d3:83:c3 -# SHA256 Fingerprint: fc:bf:e2:88:62:06:f7:2b:27:59:3c:8b:07:02:97:e1:2d:76:9e:d1:0e:d7:93:07:05:a8:09:8e:ff:c1:4d:17 ------BEGIN CERTIFICATE----- -MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET -MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk -BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 -Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl -cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 -aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY -F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N -8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe -rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K -/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu -7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC -28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 -lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E -nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB -0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 -5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj -WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN -jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ -KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s -ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM -OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q -619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn -2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj -o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v -nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG -5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq -pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb -dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 -BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -2664,96 +2466,6 @@ jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN ZetX2fNXlrtIzYE= -----END CERTIFICATE----- -# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing -# Label: "StartCom Certification Authority" -# Serial: 45 -# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16 -# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0 -# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11 ------BEGIN CERTIFICATE----- -MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg -Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 -MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi -U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh -cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk -pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf -OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C -Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT -Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi -HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM -Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w -+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ -Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 -Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B -26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID -AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD -VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul -F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC -ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w -ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk -aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 -YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg -c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 -aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 -d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG -CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 -dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF -wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS -Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst -0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc -pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl -CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF -P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK -1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm -KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE -JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ -8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm -fyWl8kgAwKQB2j8= ------END CERTIFICATE----- - -# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd. -# Label: "StartCom Certification Authority G2" -# Serial: 59 -# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64 -# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17 -# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95 ------BEGIN CERTIFICATE----- -MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW -MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm -aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 -OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG -A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ -JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD -vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo -D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ -Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW -RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK -HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN -nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM -0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i -UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 -Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg -TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE -AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL -BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K -2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX -UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl -6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK -9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ -HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI -wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY -XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l -IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo -hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr -so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI ------END CERTIFICATE----- - # Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327 # Label: "Buypass Class 2 Root CA" @@ -2895,39 +2607,6 @@ iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= -----END CERTIFICATE----- -# Issuer: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. (c) Aral\u0131k 2007 -# Subject: CN=T\xdcRKTRUST Elektronik Sertifika Hizmet Sa\u011flay\u0131c\u0131s\u0131 O=T\xdcRKTRUST Bilgi \u0130leti\u015fim ve Bili\u015fim G\xfcvenli\u011fi Hizmetleri A.\u015e. (c) Aral\u0131k 2007 -# Label: "TURKTRUST Certificate Services Provider Root 2007" -# Serial: 1 -# MD5 Fingerprint: 2b:70:20:56:86:82:a0:18:c8:07:53:12:28:70:21:72 -# SHA1 Fingerprint: f1:7f:6f:b6:31:dc:99:e3:a3:c8:7f:fe:1c:f1:81:10:88:d9:60:33 -# SHA256 Fingerprint: 97:8c:d9:66:f2:fa:a0:7b:a7:aa:95:00:d9:c0:2e:9d:77:f2:cd:ad:a6:ad:6b:a7:4a:f4:b9:1c:66:59:3c:50 ------BEGIN CERTIFICATE----- -MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx -OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry -b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC -VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE -sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F -ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY -KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG -+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG -HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P -IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M -733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk -Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G -CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW -AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I -aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 -mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa -XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ -qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 ------END CERTIFICATE----- - # Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH # Label: "D-TRUST Root Class 3 CA 2 2009" @@ -2994,67 +2673,6 @@ xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 -----END CERTIFICATE----- -# Issuer: CN=Autoridad de Certificacion Raiz del Estado Venezolano O=Sistema Nacional de Certificacion Electronica OU=Superintendencia de Servicios de Certificacion Electronica -# Subject: CN=PSCProcert O=Sistema Nacional de Certificacion Electronica OU=Proveedor de Certificados PROCERT -# Label: "PSCProcert" -# Serial: 11 -# MD5 Fingerprint: e6:24:e9:12:01:ae:0c:de:8e:85:c4:ce:a3:12:dd:ec -# SHA1 Fingerprint: 70:c1:8d:74:b4:28:81:0a:e4:fd:a5:75:d7:01:9f:99:b0:3d:50:74 -# SHA256 Fingerprint: 3c:fc:3c:14:d1:f6:84:ff:17:e3:8c:43:ca:44:0c:00:b9:67:ec:93:3e:8b:fe:06:4c:a1:d7:2c:90:f2:ad:b0 ------BEGIN CERTIFICATE----- -MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1 -dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s -YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz -dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 -aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh -IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ -KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEw -MFoXDTIwMTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHBy -b2NlcnQubmV0LnZlMQ8wDQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGEx -KjAoBgNVBAsTIVByb3ZlZWRvciBkZSBDZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQG -A1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9u -aWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo9 -7BVCwfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74 -BCXfgI8Qhd19L3uA3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38G -ieU89RLAu9MLmV+QfI4tL3czkkohRqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9 -JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmOEO8GqQKJ/+MMbpfg353bIdD0 -PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG20qCZyFSTXai2 -0b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH -0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/ -6mnbVSKVUyqUtd+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1m -v6JpIzi4mWCZDlZTOpx+FIywBm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7 -K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvpr2uKGcfLFFb14dq12fy/czja+eev -bqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/AgEBMDcGA1UdEgQw -MC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0w -MB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFD -gBStuyIdxuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0 -b3JpZGFkIGRlIENlcnRpZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xh -bm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0 -cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRp -ZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEg -ZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkq -hkiG9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQD -AgEGME0GA1UdEQRGMESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0w -MDAwMDKgGwYFYIZeAgKgEgwQUklGLUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEag -RKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9sY3IvQ0VSVElGSUNBRE8t -UkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNyYWl6LnN1c2Nl -cnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v -Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsG -AQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcN -AQELBQADggIBACtZ6yKZu4SqT96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS -1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmNg7+mvTV+LFwxNG9s2/NkAZiqlCxB -3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4quxtxj7mkoP3Yldmv -Wb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1n8Gh -HVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHm -pHmJWhSnFFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXz -sOfIt+FTvZLm8wyWuevo5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bE -qCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq3TNWOByyrYDT13K9mmyZY+gAu0F2Bbdb -mRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5poLWccret9W6aAjtmcz9 -opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3YeMLEYC/H -YvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km ------END CERTIFICATE----- - # Issuer: CN=CA Disig Root R1 O=Disig a.s. # Subject: CN=CA Disig Root R1 O=Disig a.s. # Label: "CA Disig Root R1" @@ -3629,85 +3247,6 @@ r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign O=WoSign CA Limited -# Label: "WoSign" -# Serial: 125491772294754854453622855443212256657 -# MD5 Fingerprint: a1:f2:f9:b5:d2:c8:7a:74:b8:f3:05:f1:d7:e1:84:8d -# SHA1 Fingerprint: b9:42:94:bf:91:ea:8f:b6:4b:e6:10:97:c7:fb:00:13:59:b6:76:cb -# SHA256 Fingerprint: 4b:22:d5:a6:ae:c9:9f:3c:db:79:aa:5e:c0:68:38:47:9c:d5:ec:ba:71:64:f7:f2:2d:c1:d6:5f:63:d8:57:08 ------BEGIN CERTIFICATE----- -MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBV -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNV -BAMTIUNlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgw -MTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFX -b1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvcqN -rLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1U -fcIiePyOCbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcScc -f+Hb0v1naMQFXQoOXXDX2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2 -ZjC1vt7tj/id07sBMOby8w7gLJKA84X5KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4M -x1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR+ScPewavVIMYe+HdVHpR -aG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ezEC8wQjch -zDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDar -uHqklWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221K -mYo0SLwX3OSACCK28jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvA -Sh0JWzko/amrzgD5LkhLJuYwTKVYyrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWv -HYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0CAwEAAaNCMEAwDgYDVR0PAQH/ -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R8bNLtwYgFP6H -EtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1 -LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJ -MuYhOZO9sxXqT2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2e -JXLOC62qx1ViC777Y7NhRCOjy+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VN -g64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC2nz4SNAzqfkHx5Xh9T71XXG68pWp -dIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes5cVAWubXbHssw1ab -R80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/EaEQ -PkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGce -xGATVdVhmVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+ -J7x6v+Db9NpSvd4MVHAxkUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMl -OtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGikpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWT -ee5Ehr7XHuQe+w== ------END CERTIFICATE----- - -# Issuer: CN=CA \u6c83\u901a\u6839\u8bc1\u4e66 O=WoSign CA Limited -# Subject: CN=CA \u6c83\u901a\u6839\u8bc1\u4e66 O=WoSign CA Limited -# Label: "WoSign China" -# Serial: 106921963437422998931660691310149453965 -# MD5 Fingerprint: 78:83:5b:52:16:76:c4:24:3b:83:78:e8:ac:da:9a:93 -# SHA1 Fingerprint: 16:32:47:8d:89:f9:21:3a:92:00:85:63:f5:a4:a7:d3:12:40:8a:d6 -# SHA256 Fingerprint: d6:f0:34:bd:94:aa:23:3f:02:97:ec:a4:24:5b:28:39:73:e4:47:aa:59:0f:31:0c:77:f4:8f:df:83:11:22:54 ------BEGIN CERTIFICATE----- -MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBG -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNV -BAMMEkNBIOayg+mAmuagueivgeS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgw -MTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRl -ZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k8H/r -D195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld1 -9AXbbQs5uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExf -v5RxadmWPgxDT74wwJ85dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnk -UkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+L -NVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFyb7Ao65vh4YOhn0pdr8yb -+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc76DbT52V -qyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6K -yX2m+Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0G -AbQOXDBGVWCvOGU6yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaK -J/kR8slC/k7e3x9cxKSGhxYzoacXGKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwEC -AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O -BBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUAA4ICAQBqinA4 -WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6 -yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj -/feTZU7n85iYr83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6 -jBAyvd0zaziGfjk9DgNyp115j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2 -ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0AkLppRQjbbpCBhqcqBT/mhDn4t/lX -X0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97qA4bLJyuQHCH2u2n -FoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Yjj4D -u9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10l -O1Hm13ZBONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Le -ie2uPAmvylezkolwQOQvT8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR1 -2KvxAmLBsX5VYc8T1yaw15zLKYs4SgsOkI26oQ== ------END CERTIFICATE----- - # Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited # Label: "COMODO RSA Certification Authority" @@ -4219,56 +3758,6 @@ aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= -----END CERTIFICATE----- -# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited -# Label: "Certification Authority of WoSign G2" -# Serial: 142423943073812161787490648904721057092 -# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 -# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 -# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY -MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV -BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx -MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK -ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo -b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX -JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO -gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg -5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n -fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 -2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ -KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 -fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G -3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy -SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng -LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 -XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= ------END CERTIFICATE----- - -# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited -# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited -# Label: "CA WoSign ECC Root" -# Serial: 138625735294506723296996289575837012112 -# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 -# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b -# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 ------BEGIN CERTIFICATE----- -MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw -CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT -EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 -NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb -MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID -YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 -KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES -1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB -1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 -aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K ------END CERTIFICATE----- - # Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A. # Label: "SZAFIR ROOT CA2" @@ -4832,6 +4321,288 @@ lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c 8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= -----END CERTIFICATE----- + +# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD. +# Label: "GDCA TrustAUTH R5 ROOT" +# Serial: 9009899650740120186 +# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4 +# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4 +# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93 +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE +BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0 +MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV +BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w +HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj +Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj +TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u +KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj +qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm +MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12 +ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP +zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk +L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC +jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA +HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC +AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm +DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5 +COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry +L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf +JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg +IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io +2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV +09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ +XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq +T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe +MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-1" +# Serial: 15752444095811006489 +# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45 +# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a +# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y +IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB +pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h +IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG +A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU +cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid +RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V +seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme +9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV +EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW +hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/ +DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD +ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I +/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ +yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts +L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN +zl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor RootCert CA-2" +# Serial: 2711694510199101698 +# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64 +# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0 +# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65 +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig +Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk +MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg +Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD +VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy +dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+ +QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq +1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp +2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK +DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape +az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF +3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88 +oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM +g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3 +mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd +BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U +nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw +DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX +dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+ +MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL +/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX +CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa +ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW +2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7 +N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3 +Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB +As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp +5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu +1uwJ +-----END CERTIFICATE----- + +# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority +# Label: "TrustCor ECA-1" +# Serial: 9548242946988625984 +# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c +# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd +# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD +VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk +MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y +IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV +BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw +IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy +dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig +RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb +3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA +BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5 +3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou +owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/ +wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF +ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf +BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/ +MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv +civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2 +AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50 +soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI +WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi +tJ/X5g== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation +# Label: "SSL.com Root Certification Authority RSA" +# Serial: 8875640296558310041 +# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29 +# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb +# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com Root Certification Authority ECC" +# Serial: 8495723813297216424 +# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e +# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a +# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority RSA R2" +# Serial: 6248227494352943350 +# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95 +# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a +# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation +# Label: "SSL.com EV Root Certification Authority ECC" +# Serial: 3182246526754555285 +# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90 +# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d +# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8 +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx +NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv +bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA +VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku +WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP +MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX +5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ +ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg +h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- # Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Secure Server CA" diff --git a/ext/diskcache/__init__.py b/ext/diskcache/__init__.py new file mode 100644 index 0000000000..622cd6ff52 --- /dev/null +++ b/ext/diskcache/__init__.py @@ -0,0 +1,34 @@ +"DiskCache: disk and file backed cache." + +from .core import Cache, Disk, UnknownFileWarning, EmptyDirWarning, Timeout +from .core import DEFAULT_SETTINGS, EVICTION_POLICY +from .fanout import FanoutCache +from .persistent import Deque, Index + +__all__ = [ + 'Cache', + 'Disk', + 'UnknownFileWarning', + 'EmptyDirWarning', + 'Timeout', + 'DEFAULT_SETTINGS', + 'EVICTION_POLICY', + 'FanoutCache', + 'Deque', + 'Index', +] + +try: + from .djangocache import DjangoCache # pylint: disable=wrong-import-position + __all__.append('DjangoCache') +except Exception: # pylint: disable=broad-except + # Django not installed or not setup so ignore. + pass + + +__title__ = 'diskcache' +__version__ = '2.9.0' +__build__ = 0x020900 +__author__ = 'Grant Jenks' +__license__ = 'Apache 2.0' +__copyright__ = 'Copyright 2016 Grant Jenks' diff --git a/ext/diskcache/cli.py b/ext/diskcache/cli.py new file mode 100644 index 0000000000..44bffebfcc --- /dev/null +++ b/ext/diskcache/cli.py @@ -0,0 +1 @@ +"Command line interface to disk cache." diff --git a/ext/diskcache/core.py b/ext/diskcache/core.py new file mode 100644 index 0000000000..e17435bc52 --- /dev/null +++ b/ext/diskcache/core.py @@ -0,0 +1,1804 @@ +"""Core disk and file backed cache API. + +""" + +import codecs +import contextlib as cl +import errno +import functools as ft +import io +import os +import os.path as op +import sqlite3 +import struct +import sys +import threading +import time +import warnings +import zlib + +if sys.hexversion < 0x03000000: + import cPickle as pickle + # ISSUE #25 Fix for http://bugs.python.org/issue10211 + from cStringIO import StringIO as BytesIO + TextType = unicode + BytesType = str + INT_TYPES = int, long + range = xrange # pylint: disable=redefined-builtin,invalid-name + io_open = io.open # pylint: disable=invalid-name +else: + import pickle + from io import BytesIO # pylint: disable=ungrouped-imports + TextType = str + BytesType = bytes + INT_TYPES = int, + io_open = open # pylint: disable=invalid-name + +try: + WindowsError +except NameError: + class WindowsError(Exception): + "Windows error place-holder on platforms without support." + pass + +class Constant(tuple): + "Pretty display of immutable constant." + def __new__(cls, name): + return tuple.__new__(cls, (name,)) + + def __repr__(self): + return self[0] + +DBNAME = 'cache.db' +ENOVAL = Constant('ENOVAL') + +MODE_NONE = 0 +MODE_RAW = 1 +MODE_BINARY = 2 +MODE_TEXT = 3 +MODE_PICKLE = 4 + +DEFAULT_SETTINGS = { + u'statistics': 0, # False + u'tag_index': 0, # False + u'eviction_policy': u'least-recently-stored', + u'size_limit': 2 ** 30, # 1gb + u'cull_limit': 10, + u'sqlite_cache_size': 2 ** 13, # 8,192 pages + u'sqlite_journal_mode': u'WAL', + u'sqlite_mmap_size': 2 ** 26, # 64mb + u'sqlite_synchronous': u'NORMAL', + u'disk_min_file_size': 2 ** 15, # 32kb + u'disk_pickle_protocol': pickle.HIGHEST_PROTOCOL, +} + +METADATA = { + u'count': 0, + u'size': 0, + u'hits': 0, + u'misses': 0, +} + +EVICTION_POLICY = { + 'none': { + 'init': None, + 'get': None, + 'cull': None, + }, + 'least-recently-stored': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_store_time ON' + ' Cache (store_time)' + ), + 'get': None, + 'cull': 'SELECT %s FROM Cache ORDER BY store_time LIMIT ?', + }, + 'least-recently-used': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_access_time ON' + ' Cache (access_time)' + ), + 'get': 'access_time = ((julianday("now") - 2440587.5) * 86400.0)', + 'cull': 'SELECT %s FROM Cache ORDER BY access_time LIMIT ?', + }, + 'least-frequently-used': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_access_count ON' + ' Cache (access_count)' + ), + 'get': 'access_count = access_count + 1', + 'cull': 'SELECT %s FROM Cache ORDER BY access_count LIMIT ?', + }, +} + + +class Disk(object): + "Cache key and value serialization for SQLite database and files." + def __init__(self, directory, min_file_size=0, pickle_protocol=0): + """Initialize disk instance. + + :param str directory: directory path + :param int min_file_size: minimum size for file use + :param int pickle_protocol: pickle protocol for serialization + + """ + self._directory = directory + self.min_file_size = min_file_size + self.pickle_protocol = pickle_protocol + + + def hash(self, key): + """Compute portable hash for `key`. + + :param key: key to hash + :return: hash value + + """ + mask = 0xFFFFFFFF + disk_key, _ = self.put(key) + type_disk_key = type(disk_key) + + if type_disk_key is sqlite3.Binary: + return zlib.adler32(disk_key) & mask + elif type_disk_key is TextType: + return zlib.adler32(disk_key.encode('utf-8')) & mask # pylint: disable=no-member + elif type_disk_key in INT_TYPES: + return disk_key % mask + else: + assert type_disk_key is float + return zlib.adler32(struct.pack('!d', disk_key)) & mask + + + def put(self, key): + """Convert `key` to fields key and raw for Cache table. + + :param key: key to convert + :return: (database key, raw boolean) pair + + """ + # pylint: disable=bad-continuation,unidiomatic-typecheck + type_key = type(key) + + if type_key is BytesType: + return sqlite3.Binary(key), True + elif ((type_key is TextType) + or (type_key in INT_TYPES + and -9223372036854775808 <= key <= 9223372036854775807) + or (type_key is float)): + return key, True + else: + result = pickle.dumps(key, protocol=self.pickle_protocol) + return sqlite3.Binary(result), False + + + def get(self, key, raw): + """Convert fields `key` and `raw` from Cache table to key. + + :param key: database key to convert + :param bool raw: flag indicating raw database storage + :return: corresponding Python key + + """ + # pylint: disable=no-self-use,unidiomatic-typecheck + if raw: + return BytesType(key) if type(key) is sqlite3.Binary else key + else: + return pickle.load(BytesIO(key)) + + + def store(self, value, read): + """Convert `value` to fields size, mode, filename, and value for Cache + table. + + :param value: value to convert + :param bool read: True when value is file-like object + :return: (size, mode, filename, value) tuple for Cache table + + """ + # pylint: disable=unidiomatic-typecheck + type_value = type(value) + min_file_size = self.min_file_size + + if ((type_value is TextType and len(value) < min_file_size) + or (type_value in INT_TYPES + and -9223372036854775808 <= value <= 9223372036854775807) + or (type_value is float)): + return 0, MODE_RAW, None, value + elif type_value is BytesType: + if len(value) < min_file_size: + return 0, MODE_RAW, None, sqlite3.Binary(value) + else: + filename, full_path = self.filename() + + with open(full_path, 'wb') as writer: + writer.write(value) + + return len(value), MODE_BINARY, filename, None + elif type_value is TextType: + filename, full_path = self.filename() + + with io_open(full_path, 'w', encoding='UTF-8') as writer: + writer.write(value) + + size = op.getsize(full_path) + return size, MODE_TEXT, filename, None + elif read: + size = 0 + reader = ft.partial(value.read, 2 ** 22) + filename, full_path = self.filename() + + with open(full_path, 'wb') as writer: + for chunk in iter(reader, b''): + size += len(chunk) + writer.write(chunk) + + return size, MODE_BINARY, filename, None + else: + result = pickle.dumps(value, protocol=self.pickle_protocol) + + if len(result) < min_file_size: + return 0, MODE_PICKLE, None, sqlite3.Binary(result) + else: + filename, full_path = self.filename() + + with open(full_path, 'wb') as writer: + writer.write(result) + + return len(result), MODE_PICKLE, filename, None + + + def fetch(self, mode, filename, value, read): + """Convert fields `mode`, `filename`, and `value` from Cache table to + value. + + :param int mode: value mode raw, binary, text, or pickle + :param str filename: filename of corresponding value + :param value: database value + :param bool read: when True, return an open file handle + :return: corresponding Python value + + """ + # pylint: disable=no-self-use,unidiomatic-typecheck + if mode == MODE_RAW: + return BytesType(value) if type(value) is sqlite3.Binary else value + elif mode == MODE_BINARY: + if read: + return open(op.join(self._directory, filename), 'rb') + else: + with open(op.join(self._directory, filename), 'rb') as reader: + return reader.read() + elif mode == MODE_TEXT: + full_path = op.join(self._directory, filename) + with io_open(full_path, 'r', encoding='UTF-8') as reader: + return reader.read() + elif mode == MODE_PICKLE: + if value is None: + with open(op.join(self._directory, filename), 'rb') as reader: + return pickle.load(reader) + else: + return pickle.load(BytesIO(value)) + + + def filename(self): + """Return filename and full-path tuple for file storage. + + Filename will be a randomly generated 28 character hexadecimal string + with ".val" suffixed. Two levels of sub-directories will be used to + reduce the size of directories. On older filesystems, lookups in + directories with many files are slow. + + """ + hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8') + sub_dir = op.join(hex_name[:2], hex_name[2:4]) + name = hex_name[4:] + '.val' + directory = op.join(self._directory, sub_dir) + + try: + os.makedirs(directory) + except OSError as error: + if error.errno != errno.EEXIST: + raise + + filename = op.join(sub_dir, name) + full_path = op.join(self._directory, filename) + return filename, full_path + + + def remove(self, filename): + """Remove a file given by `filename`. + + This method is cross-thread and cross-process safe. If an "error no + entry" occurs, it is suppressed. + + :param str filename: relative path to file + + """ + full_path = op.join(self._directory, filename) + + try: + os.remove(full_path) + except WindowsError: + pass + except OSError as error: + if error.errno != errno.ENOENT: + # ENOENT may occur if two caches attempt to delete the same + # file at the same time. + raise + + +class Timeout(Exception): + "Database timeout expired." + pass + + +class UnknownFileWarning(UserWarning): + "Warning used by Cache.check for unknown files." + pass + + +class EmptyDirWarning(UserWarning): + "Warning used by Cache.check for empty directories." + pass + + +class Cache(object): + "Disk and file backed cache." + # pylint: disable=bad-continuation + def __init__(self, directory, timeout=60, disk=Disk, **settings): + """Initialize cache instance. + + :param str directory: cache directory + :param float timeout: SQLite connection timeout + :param disk: Disk type or subclass for serialization + :param settings: any of DEFAULT_SETTINGS + + """ + try: + assert issubclass(disk, Disk) + except (TypeError, AssertionError): + raise ValueError('disk must subclass diskcache.Disk') + + self._directory = directory + self._timeout = 60 # Use 1 minute timeout for initialization. + self._local = threading.local() + + if not op.isdir(directory): + try: + os.makedirs(directory, 0o755) + except OSError as error: + if error.errno != errno.EEXIST: + raise EnvironmentError( + error.errno, + 'Cache directory "%s" does not exist' + ' and could not be created' % self._directory + ) + + sql = self._sql + + # Setup Settings table. + + sql('CREATE TABLE IF NOT EXISTS Settings (' + ' key TEXT NOT NULL UNIQUE,' + ' value)' + ) + + current_settings = dict(sql( + 'SELECT key, value FROM Settings' + ).fetchall()) + + sets = DEFAULT_SETTINGS.copy() + sets.update(current_settings) + sets.update(settings) + + for key in METADATA: + sets.pop(key, None) + + # Setup Disk object (must happen after settings initialized). + + kwargs = { + key[5:]: value for key, value in sets.items() + if key.startswith('disk_') + } + self._disk = disk(directory, **kwargs) + + # Set cached attributes: updates settings and sets pragmas. + + for key, value in sets.items(): + query = 'INSERT OR REPLACE INTO Settings VALUES (?, ?)' + sql(query, (key, value)) + self.reset(key, value) + + for key, value in METADATA.items(): + query = 'INSERT OR IGNORE INTO Settings VALUES (?, ?)' + sql(query, (key, value)) + self.reset(key) + + (self._page_size,), = sql('PRAGMA page_size').fetchall() + + # Setup Cache table. + + sql('CREATE TABLE IF NOT EXISTS Cache (' + ' rowid INTEGER PRIMARY KEY,' + ' key BLOB,' + ' raw INTEGER,' + ' version INTEGER DEFAULT 0,' + ' store_time REAL,' + ' expire_time REAL,' + ' access_time REAL,' + ' access_count INTEGER DEFAULT 0,' + ' tag BLOB,' + ' size INTEGER DEFAULT 0,' + ' mode INTEGER DEFAULT 0,' + ' filename TEXT,' + ' value BLOB)' + ) + + sql('CREATE UNIQUE INDEX IF NOT EXISTS Cache_key_raw ON' + ' Cache(key, raw)' + ) + + sql('CREATE INDEX IF NOT EXISTS Cache_expire_time ON' + ' Cache (expire_time)' + ) + + query = EVICTION_POLICY[self.eviction_policy]['init'] + + if query is not None: + sql(query) + + # Use triggers to keep Metadata updated. + + sql('CREATE TRIGGER IF NOT EXISTS Settings_count_insert' + ' AFTER INSERT ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value + 1' + ' WHERE key = "count"; END' + ) + + sql('CREATE TRIGGER IF NOT EXISTS Settings_count_delete' + ' AFTER DELETE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value - 1' + ' WHERE key = "count"; END' + ) + + sql('CREATE TRIGGER IF NOT EXISTS Settings_size_insert' + ' AFTER INSERT ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value + NEW.size' + ' WHERE key = "size"; END' + ) + + sql('CREATE TRIGGER IF NOT EXISTS Settings_size_update' + ' AFTER UPDATE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings' + ' SET value = value + NEW.size - OLD.size' + ' WHERE key = "size"; END' + ) + + sql('CREATE TRIGGER IF NOT EXISTS Settings_size_delete' + ' AFTER DELETE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value - OLD.size' + ' WHERE key = "size"; END' + ) + + # Create tag index if requested. + + if self.tag_index: # pylint: disable=no-member + self.create_tag_index() + else: + self.drop_tag_index() + + # Close and re-open database connection with given timeout. + + self.close() + self._timeout = timeout + assert self._sql + + + @property + def directory(self): + """Cache directory.""" + return self._directory + + + @property + def timeout(self): + """SQLite connection timeout value in seconds.""" + return self._timeout + + + @property + def disk(self): + """Disk used for serialization.""" + return self._disk + + + @property + def _sql(self): + con = getattr(self._local, 'con', None) + + if con is None: + con = self._local.con = sqlite3.connect( + op.join(self._directory, DBNAME), + timeout=self._timeout, + isolation_level=None, + ) + + return con.execute + + + @cl.contextmanager + def _transact(self, filename=None): + sql = self._sql + filenames = [] + _disk_remove = self._disk.remove + + try: + sql('BEGIN IMMEDIATE') + except sqlite3.OperationalError: + if filename is not None: + _disk_remove(filename) + raise Timeout + + try: + yield sql, filenames.append + except BaseException: + sql('ROLLBACK') + raise + else: + sql('COMMIT') + for filename in filenames: + if filename is not None: + _disk_remove(filename) + + + def set(self, key, value, expire=None, read=False, tag=None): + """Set `key` and `value` item in cache. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + :param key: key for item + :param value: value for item + :param float expire: seconds until item expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :return: True if item was set + :raises Timeout: if database timeout expires + + """ + now = time.time() + db_key, raw = self._disk.put(key) + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read) + columns = (expire_time, tag, size, mode, filename, db_value) + + # The order of SELECT, UPDATE, and INSERT is important below. + # + # Typical cache usage pattern is: + # + # value = cache.get(key) + # if value is None: + # value = expensive_calculation() + # cache.set(key, value) + # + # Cache.get does not evict expired keys to avoid writes during lookups. + # Commonly used/expired keys will therefore remain in the cache making + # an UPDATE the preferred path. + # + # The alternative is to assume the key is not present by first trying + # to INSERT and then handling the IntegrityError that occurs from + # violating the UNIQUE constraint. This optimistic approach was + # rejected based on the common cache usage pattern. + # + # INSERT OR REPLACE aka UPSERT is not used because the old filename may + # need cleanup. + + with self._transact(filename) as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename FROM Cache' + ' WHERE key = ? AND raw = ?', + (db_key, raw), + ).fetchall() + + if rows: + (rowid, old_filename), = rows + cleanup(old_filename) + self._row_update(rowid, now, columns) + else: + self._row_insert(db_key, raw, now, columns) + + self._cull(now, sql, cleanup) + + return True + + + __setitem__ = set + + + def _row_update(self, rowid, now, columns): + sql = self._sql + expire_time, tag, size, mode, filename, value = columns + sql('UPDATE Cache SET' + ' version = ?,' + ' store_time = ?,' + ' expire_time = ?,' + ' access_time = ?,' + ' access_count = ?,' + ' tag = ?,' + ' size = ?,' + ' mode = ?,' + ' filename = ?,' + ' value = ?' + ' WHERE rowid = ?', ( + 0, # version + now, # store_time + expire_time, + now, # access_time + 0, # access_count + tag, + size, + mode, + filename, + value, + rowid, + ), + ) + + + def _row_insert(self, key, raw, now, columns): + sql = self._sql + expire_time, tag, size, mode, filename, value = columns + sql('INSERT INTO Cache(' + ' key, raw, version, store_time, expire_time, access_time,' + ' access_count, tag, size, mode, filename, value' + ') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', ( + key, + raw, + 0, # version + now, # store_time + expire_time, + now, # access_time + 0, # access_count + tag, + size, + mode, + filename, + value, + ), + ) + + + def _cull(self, now, sql, cleanup): + cull_limit = self.cull_limit + + if cull_limit == 0: + return + + # Evict expired keys. + + select_expired_template = ( + 'SELECT %s FROM Cache' + ' WHERE expire_time IS NOT NULL AND expire_time < ?' + ' ORDER BY expire_time LIMIT ?' + ) + + select_expired = select_expired_template % 'filename' + rows = sql(select_expired, (now, cull_limit)).fetchall() + + if rows: + delete_expired = ( + 'DELETE FROM Cache WHERE rowid IN (%s)' + % (select_expired_template % 'rowid') + ) + sql(delete_expired, (now, cull_limit)) + + for filename, in rows: + cleanup(filename) + + cull_limit -= len(rows) + + if cull_limit == 0: + return + + # Evict keys by policy. + + select_policy_template = EVICTION_POLICY[self.eviction_policy]['cull'] + + if select_policy_template is None or self.volume() < self.size_limit: + return + + select_policy = select_policy_template % 'filename' + + rows = sql(select_policy, (cull_limit,)).fetchall() + + if rows: + delete_policy = ( + 'DELETE FROM Cache WHERE rowid IN (%s)' + % (select_policy_template % 'rowid') + ) + sql(delete_policy, (cull_limit,)) + + for filename, in rows: + cleanup(filename) + + + def add(self, key, value, expire=None, read=False, tag=None): + """Add `key` and `value` item to cache. + + Similar to `set`, but only add to cache if key not present. + + Operation is atomic. Only one concurrent add operation for a given key + will succeed. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :return: True if item was added + :raises Timeout: if database timeout expires + + """ + now = time.time() + db_key, raw = self._disk.put(key) + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read) + columns = (expire_time, tag, size, mode, filename, db_value) + + with self._transact(filename) as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename, expire_time FROM Cache' + ' WHERE key = ? AND raw = ?', + (db_key, raw), + ).fetchall() + + if rows: + (rowid, old_filename, old_expire_time), = rows + + if old_expire_time is None or old_expire_time > now: + cleanup(filename) + return False + + cleanup(old_filename) + self._row_update(rowid, now, columns) + else: + self._row_insert(db_key, raw, now, columns) + + self._cull(now, sql, cleanup) + + return True + + + def incr(self, key, delta=1, default=0): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int default: value if key is missing (default None) + :return: new value for item + :raises KeyError: if key is not found and default is None + :raises Timeout: if database timeout expires + + """ + now = time.time() + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid, expire_time, filename, value FROM Cache' + ' WHERE key = ? AND raw = ?' + ) + + with self._transact() as (sql, cleanup): + rows = sql(select, (db_key, raw)).fetchall() + + if not rows: + if default is None: + raise KeyError(key) + + value = default + delta + columns = (None, None) + self._disk.store(value, False) + self._row_insert(db_key, raw, now, columns) + self._cull(now, sql, cleanup) + return value + + (rowid, expire_time, filename, value), = rows + + if expire_time is not None and expire_time < now: + if default is None: + raise KeyError(key) + + value = default + delta + columns = (None, None) + self._disk.store(value, False) + self._row_update(rowid, now, columns) + self._cull(now, sql, cleanup) + cleanup(filename) + return value + + value += delta + + columns = 'store_time = ?, value = ?' + update_column = EVICTION_POLICY[self.eviction_policy]['get'] + columns += '' if update_column is None else ', ' + update_column + update = 'UPDATE Cache SET %s WHERE rowid = ?' % columns + sql(update, (now, value, rowid)) + + return value + + + def decr(self, key, delta=1, default=0): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int default: value if key is missing (default 0) + :return: new value for item + :raises KeyError: if key is not found and default is None + :raises Timeout: if database timeout expires + + """ + return self.incr(key, -delta, default) + + + def get(self, key, default=None, read=False, expire_time=False, tag=False): + """Retrieve value from cache. If `key` is missing, return `default`. + + :param key: key for item + :param default: value to return if key is missing (default None) + :param bool read: if True, return file handle to value + (default False) + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :return: value for item or default if key not found + :raises Timeout: if database timeout expires + + """ + db_key, raw = self._disk.put(key) + update_column = EVICTION_POLICY[self.eviction_policy]['get'] + update = 'UPDATE Cache SET %s WHERE rowid = ?' + select = ( + 'SELECT rowid, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + if expire_time and tag: + default = (default, None, None) + elif expire_time or tag: + default = (default, None) + + if not self.statistics and update_column is None: + # Fast path, no transaction necessary. + + rows = self._sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + return default + + (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows + + try: + value = self._disk.fetch(mode, filename, db_value, read) + except IOError as error: + if error.errno == errno.ENOENT: + # Key was deleted before we could retrieve result. + return default + else: + raise + + else: # Slow path, transaction required. + + cache_hit = ( + 'UPDATE Settings SET value = value + 1 WHERE key = "hits"' + ) + cache_miss = ( + 'UPDATE Settings SET value = value + 1 WHERE key = "misses"' + ) + + with self._transact() as (sql, _): + rows = sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + if self.statistics: + sql(cache_miss) + return default + + (rowid, db_expire_time, db_tag, + mode, filename, db_value), = rows + + try: + value = self._disk.fetch(mode, filename, db_value, read) + except IOError as error: + if error.errno == errno.ENOENT: + # Key was deleted before we could retrieve result. + if self.statistics: + sql(cache_miss) + return default + else: + raise + + if self.statistics: + sql(cache_hit) + + if update_column is not None: + sql(update % update_column, (rowid,)) + + if expire_time and tag: + return (value, db_expire_time, db_tag) + elif expire_time: + return (value, db_expire_time) + elif tag: + return (value, db_tag) + else: + return value + + + def __getitem__(self, key): + """Return corresponding value for `key` from cache. + + :param key: key matching item + :return: corresponding value + :raises KeyError: if key is not found + :raises Timeout: if database timeout expires + + """ + value = self.get(key, default=ENOVAL) + if value is ENOVAL: + raise KeyError(key) + return value + + + def read(self, key): + """Return file handle value corresponding to `key` from cache. + + :param key: key matching item + :return: file open for reading in binary mode + :raises KeyError: if key is not found + :raises Timeout: if database timeout expires + + """ + handle = self.get(key, default=ENOVAL, read=True) + if handle is ENOVAL: + raise KeyError(key) + return handle + + + def __contains__(self, key): + """Return `True` if `key` matching item is found in cache. + + :param key: key matching item + :return: True if key matching item + + """ + sql = self._sql + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid FROM Cache' + ' WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + rows = sql(select, (db_key, raw, time.time())).fetchall() + + return bool(rows) + + + def pop(self, key, default=None, expire_time=False, tag=False): + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + :param key: key for item + :param default: value to return if key is missing (default None) + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :return: value for item or default if key not found + :raises Timeout: if database timeout expires + + """ + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + if expire_time and tag: + default = default, None, None + elif expire_time or tag: + default = default, None + + with self._transact() as (sql, _): + rows = sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + return default + + (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows + + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + try: + value = self._disk.fetch(mode, filename, db_value, False) + except IOError as error: + if error.errno == errno.ENOENT: + # Key was deleted before we could retrieve result. + return default + else: + raise + finally: + if filename is not None: + self._disk.remove(filename) + + if expire_time and tag: + return value, db_expire_time, db_tag + elif expire_time: + return value, db_expire_time + elif tag: + return value, db_tag + else: + return value + + + def __delitem__(self, key): + """Delete corresponding item for `key` from cache. + + :param key: key matching item + :raises KeyError: if key is not found + :raises Timeout: if database timeout expires + + """ + db_key, raw = self._disk.put(key) + + with self._transact() as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename FROM Cache' + ' WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)', + (db_key, raw, time.time()), + ).fetchall() + + if not rows: + raise KeyError(key) + + (rowid, filename), = rows + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + cleanup(filename) + + return True + + + def delete(self, key): + """Delete corresponding item for `key` from cache. + + Missing keys are ignored. + + :param key: key matching item + :return: True if item was deleted + :raises Timeout: if database timeout expires + + """ + try: + return self.__delitem__(key) + except KeyError: + return False + + + def push(self, value, prefix=None, side='back', expire=None, read=False, + tag=None): + """Push `value` onto `side` of queue identified by `prefix` in cache. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + Defaults to pushing value on back of queue. Set side to 'front' to push + value on front of queue. Side must be one of 'back' or 'front'. + + Operation is atomic. Concurrent operations will be serialized. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + See also `Cache.pull`. + + >>> cache = Cache('/tmp/test') + >>> _ = cache.clear() + >>> print(cache.push('first value')) + 500000000000000 + >>> cache.get(500000000000000) + 'first value' + >>> print(cache.push('second value')) + 500000000000001 + >>> print(cache.push('third value', side='front')) + 499999999999999 + >>> cache.push(1234, prefix='userids') + 'userids-500000000000000' + + :param value: value for item + :param str prefix: key prefix (default None, key is integer) + :param str side: either 'back' or 'front' (default 'back') + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :return: key for item in cache + :raises Timeout: if database timeout expires + + """ + if prefix is None: + min_key = 0 + max_key = 999999999999999 + else: + min_key = prefix + '-000000000000000' + max_key = prefix + '-999999999999999' + + now = time.time() + raw = True + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read) + columns = (expire_time, tag, size, mode, filename, db_value) + order = {'back': 'DESC', 'front': 'ASC'} + select = ( + 'SELECT key FROM Cache' + ' WHERE ? < key AND key < ? AND raw = ?' + ' ORDER BY key %s LIMIT 1' + ) % order[side] + + with self._transact(filename) as (sql, cleanup): + rows = sql(select, (min_key, max_key, raw)).fetchall() + + if rows: + (key,), = rows + + if prefix is not None: + num = int(key[(key.rfind('-') + 1):]) + else: + num = key + + if side == 'back': + num += 1 + else: + assert side == 'front' + num -= 1 + else: + num = 500000000000000 + + if prefix is not None: + db_key = '{0}-{1:015d}'.format(prefix, num) + else: + db_key = num + + self._row_insert(db_key, raw, now, columns) + self._cull(now, sql, cleanup) + + return db_key + + + def pull(self, prefix=None, default=(None, None), side='front', + expire_time=False, tag=False): + """Pull key and value item pair from `side` of queue in cache. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + If queue is empty, return default. + + Defaults to pulling key and value item pairs from front of queue. Set + side to 'back' to pull from back of queue. Side must be one of 'front' + or 'back'. + + Operation is atomic. Concurrent operations will be serialized. + + See also `Cache.push` and `Cache.get`. + + >>> cache = Cache('/tmp/test') + >>> _ = cache.clear() + >>> cache.pull() + (None, None) + >>> for letter in 'abc': + ... print(cache.push(letter)) + 500000000000000 + 500000000000001 + 500000000000002 + >>> key, value = cache.pull() + >>> print(key) + 500000000000000 + >>> value + 'a' + >>> _, value = cache.pull(side='back') + >>> value + 'c' + >>> cache.push(1234, 'userids') + 'userids-500000000000000' + >>> _, value = cache.pull('userids') + >>> value + 1234 + + :param str prefix: key prefix (default None, key is integer) + :param default: value to return if key is missing + (default (None, None)) + :param str side: either 'front' or 'back' (default 'front') + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :return: key and value item pair or default if queue is empty + :raises Timeout: if database timeout expires + + """ + if prefix is None: + min_key = 0 + max_key = 999999999999999 + else: + min_key = prefix + '-000000000000000' + max_key = prefix + '-999999999999999' + + order = {'front': 'ASC', 'back': 'DESC'} + select = ( + 'SELECT rowid, key, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE ? < key AND key < ? AND raw = 1' + ' ORDER BY key %s LIMIT 1' + ) % order[side] + + if expire_time and tag: + default = default, None, None + elif expire_time or tag: + default = default, None + + while True: + with self._transact() as (sql, cleanup): + rows = sql(select, (min_key, max_key)).fetchall() + + if not rows: + return default + + (rowid, key, db_expire, db_tag, mode, name, db_value), = rows + + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + if db_expire is not None and db_expire < time.time(): + cleanup(name) + else: + break + + try: + value = self._disk.fetch(mode, name, db_value, False) + except IOError as error: + if error.errno == errno.ENOENT: + # Key was deleted before we could retrieve result. + return default + else: + raise + finally: + if name is not None: + self._disk.remove(name) + + if expire_time and tag: + return (key, value), db_expire, db_tag + elif expire_time: + return (key, value), db_expire + elif tag: + return (key, value), db_tag + else: + return key, value + + + def check(self, fix=False): + """Check database and file system consistency. + + Intended for use in testing and post-mortem error analysis. + + While checking the Cache table for consistency, a writer lock is held + on the database. The lock blocks other cache clients from writing to + the database. For caches with many file references, the lock may be + held for a long time. For example, local benchmarking shows that a + cache with 1,000 file references takes ~60ms to check. + + :param bool fix: correct inconsistencies + :return: list of warnings + :raises Timeout: if database timeout expires + + """ + # pylint: disable=access-member-before-definition,W0201 + with warnings.catch_warnings(record=True) as warns: + sql = self._sql + + # Check integrity of database. + + rows = sql('PRAGMA integrity_check').fetchall() + + if len(rows) != 1 or rows[0][0] != u'ok': + for message, in rows: + warnings.warn(message) + + if fix: + sql('VACUUM') + + with self._transact() as (sql, _): + + # Check Cache.filename against file system. + + filenames = set() + select = ( + 'SELECT rowid, size, filename FROM Cache' + ' WHERE filename IS NOT NULL' + ) + + rows = sql(select).fetchall() + + for rowid, size, filename in rows: + full_path = op.join(self._directory, filename) + filenames.add(full_path) + + if op.exists(full_path): + real_size = op.getsize(full_path) + + if size != real_size: + message = 'wrong file size: %s, %d != %d' + args = full_path, real_size, size + warnings.warn(message % args) + + if fix: + sql('UPDATE Cache SET size = ?' + ' WHERE rowid = ?', + (real_size, rowid), + ) + + continue + + warnings.warn('file not found: %s' % full_path) + + if fix: + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + # Check file system against Cache.filename. + + for dirpath, _, files in os.walk(self._directory): + paths = [op.join(dirpath, filename) for filename in files] + error = set(paths) - filenames + + for full_path in error: + if DBNAME in full_path: + continue + + message = 'unknown file: %s' % full_path + warnings.warn(message, UnknownFileWarning) + + if fix: + os.remove(full_path) + + # Check for empty directories. + + for dirpath, dirs, files in os.walk(self._directory): + if not (dirs or files): + message = 'empty directory: %s' % dirpath + warnings.warn(message, EmptyDirWarning) + + if fix: + os.rmdir(dirpath) + + # Check Settings.count against count of Cache rows. + + self.reset('count') + (count,), = sql('SELECT COUNT(key) FROM Cache').fetchall() + + if self.count != count: + message = 'Settings.count != COUNT(Cache.key); %d != %d' + warnings.warn(message % (self.count, count)) + + if fix: + sql('UPDATE Settings SET value = ? WHERE key = ?', + (count, 'count'), + ) + + # Check Settings.size against sum of Cache.size column. + + self.reset('size') + select_size = 'SELECT COALESCE(SUM(size), 0) FROM Cache' + (size,), = sql(select_size).fetchall() + + if self.size != size: + message = 'Settings.size != SUM(Cache.size); %d != %d' + warnings.warn(message % (self.size, size)) + + if fix: + sql('UPDATE Settings SET value = ? WHERE key =?', + (size, 'size'), + ) + + return warns + + + def create_tag_index(self): + """Create tag index on cache database. + + It is better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout expires + + """ + sql = self._sql + sql('CREATE INDEX IF NOT EXISTS Cache_tag_rowid ON Cache(tag, rowid)') + self.reset('tag_index', 1) + + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout expires + + """ + sql = self._sql + sql('DROP INDEX IF EXISTS Cache_tag_rowid') + self.reset('tag_index', 0) + + + def evict(self, tag): + """Remove items with matching `tag` from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + :param str tag: tag identifying items + :return: count of rows removed + :raises Timeout: if database timeout expires + + """ + select = ( + 'SELECT rowid, filename FROM Cache' + ' WHERE tag = ? AND rowid > ?' + ' ORDER BY rowid LIMIT ?' + ) + args = [tag, 0, 100] + return self._select_delete(select, args, arg_index=1) + + + def expire(self, now=None): + """Remove expired items from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + :param float now: current time (default None, ``time.time()`` used) + :return: count of items removed + :raises Timeout: if database timeout expires + + """ + select = ( + 'SELECT rowid, expire_time, filename FROM Cache' + ' WHERE ? < expire_time AND expire_time < ?' + ' ORDER BY expire_time LIMIT ?' + ) + args = [0, now or time.time(), 100] + return self._select_delete(select, args, row_index=1) + + + def clear(self): + """Remove all items from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + :return: count of rows removed + :raises Timeout: if database timeout expires + + """ + select = ( + 'SELECT rowid, filename FROM Cache' + ' WHERE rowid > ?' + ' ORDER BY rowid LIMIT ?' + ) + args = [0, 100] + return self._select_delete(select, args) + + + def _select_delete(self, select, args, row_index=0, arg_index=0): + count = 0 + delete = 'DELETE FROM Cache WHERE rowid IN (%s)' + + try: + while True: + with self._transact() as (sql, cleanup): + rows = sql(select, args).fetchall() + + if not rows: + break + + count += len(rows) + sql(delete % ','.join(str(row[0]) for row in rows)) + + for row in rows: + args[arg_index] = row[row_index] + cleanup(row[-1]) + + except Timeout: + raise Timeout(count) + + return count + + + def iterkeys(self, reverse=False): + """Iterate Cache keys in database sort order. + + >>> cache = Cache('/tmp/diskcache') + >>> _ = cache.clear() + >>> for key in [4, 1, 3, 0, 2]: + ... cache[key] = key + >>> list(cache.iterkeys()) + [0, 1, 2, 3, 4] + >>> list(cache.iterkeys(reverse=True)) + [4, 3, 2, 1, 0] + + :param bool reverse: reverse sort order (default False) + :return: iterator of Cache keys + + """ + sql = self._sql + limit = 100 + _disk_get = self._disk.get + + if reverse: + select = ( + 'SELECT key, raw FROM Cache' + ' ORDER BY key DESC, raw DESC LIMIT 1' + ) + iterate = ( + 'SELECT key, raw FROM Cache' + ' WHERE key = ? AND raw < ? OR key < ?' + ' ORDER BY key DESC, raw DESC LIMIT ?' + ) + else: + select = ( + 'SELECT key, raw FROM Cache' + ' ORDER BY key ASC, raw ASC LIMIT 1' + ) + iterate = ( + 'SELECT key, raw FROM Cache' + ' WHERE key = ? AND raw > ? OR key > ?' + ' ORDER BY key ASC, raw ASC LIMIT ?' + ) + + row = sql(select).fetchall() + + if row: + (key, raw), = row + else: + return + + yield _disk_get(key, raw) + + while True: + rows = sql(iterate, (key, raw, key, limit)).fetchall() + + if not rows: + break + + for key, raw in rows: + yield _disk_get(key, raw) + + + def _iter(self, ascending=True): + sql = self._sql + rows = sql('SELECT MAX(rowid) FROM Cache').fetchall() + (max_rowid,), = rows + yield # Signal ready. + + if max_rowid is None: + return + + bound = max_rowid + 1 + limit = 100 + _disk_get = self._disk.get + rowid = 0 if ascending else bound + select = ( + 'SELECT rowid, key, raw FROM Cache' + ' WHERE ? < rowid AND rowid < ?' + ' ORDER BY rowid %s LIMIT ?' + ) % ('ASC' if ascending else 'DESC') + + while True: + if ascending: + args = (rowid, bound, limit) + else: + args = (0, rowid, limit) + + rows = sql(select, args).fetchall() + + if not rows: + break + + for rowid, key, raw in rows: + yield _disk_get(key, raw) + + + def __iter__(self): + "Iterate keys in cache including expired items." + iterator = self._iter() + next(iterator) + return iterator + + + def __reversed__(self): + "Reverse iterate keys in cache including expired items." + iterator = self._iter(ascending=False) + next(iterator) + return iterator + + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + # pylint: disable=E0203,W0201 + result = (self.reset('hits'), self.reset('misses')) + + if reset: + self.reset('hits', 0) + self.reset('misses', 0) + + self.reset('statistics', enable) + + return result + + + def volume(self): + """Return estimated total size of cache on disk. + + :return: size in bytes + + """ + (page_count,), = self._sql('PRAGMA page_count').fetchall() + total_size = self._page_size * page_count + self.reset('size') + return total_size + + + def close(self): + """Close database connection. + + """ + con = getattr(self._local, 'con', None) + + if con is None: + return + + con.close() + + try: + delattr(self._local, 'con') + except AttributeError: + pass + + + def __enter__(self): + return self + + + def __exit__(self, *exception): + self.close() + + + def __len__(self): + "Count of items in cache including expired items." + return self.reset('count') + + + def __getstate__(self): + return (self.directory, self.timeout, type(self.disk)) + + + def __setstate__(self, state): + self.__init__(*state) + + + def reset(self, key, value=ENOVAL): + """Reset `key` and `value` item from Settings table. + + If `value` is not given, it is reloaded from the Settings + table. Otherwise, the Settings table is updated. + + Settings attributes on cache objects are lazy-loaded and + read-only. Use `reset` to update the value. + + Settings with the ``disk_`` prefix correspond to Disk + attributes. Updating the value will change the unprefixed attribute on + the associated Disk instance. + + Settings with the ``sqlite_`` prefix correspond to SQLite + pragmas. Updating the value will execute the corresponding PRAGMA + statement. + + :param str key: Settings key for item + :param value: value for item (optional) + :return: updated value for item + :raises Timeout: if database timeout expires + + """ + if value is ENOVAL: + select = 'SELECT value FROM Settings WHERE key = ?' + (value,), = self._sql(select, (key,)).fetchall() + setattr(self, key, value) + return value + else: + with self._transact() as (sql, _): + update = 'UPDATE Settings SET value = ? WHERE key = ?' + sql(update, (value, key)) + + if key.startswith('sqlite_'): + + # 2016-02-17 GrantJ - PRAGMA and autocommit_level=None + # don't always play nicely together. Retry setting the + # PRAGMA. I think some PRAGMA statements expect to + # immediately take an EXCLUSIVE lock on the database. I + # can't find any documentation for this but without the + # retry, stress will intermittently fail with multiple + # processes. + + pause = 0.001 + count = 60000 # 60 / 0.001 + error = sqlite3.OperationalError + pragma = key[7:] + + for _ in range(count): + try: + args = pragma, value + sql('PRAGMA %s = %s' % args).fetchall() + except sqlite3.OperationalError as exc: + error = exc + time.sleep(pause) + else: + break + else: + raise error + + del error + + elif key.startswith('disk_'): + attr = key[5:] + setattr(self._disk, attr, value) + + setattr(self, key, value) + return value diff --git a/ext/diskcache/djangocache.py b/ext/diskcache/djangocache.py new file mode 100644 index 0000000000..99cd739501 --- /dev/null +++ b/ext/diskcache/djangocache.py @@ -0,0 +1,321 @@ +"Django-compatible disk and file backed cache." + +from django.core.cache.backends.base import BaseCache + +try: + from django.core.cache.backends.base import DEFAULT_TIMEOUT +except ImportError: + # For older versions of Django simply use 300 seconds. + DEFAULT_TIMEOUT = 300 + +from .fanout import FanoutCache + + +class DjangoCache(BaseCache): + "Django-compatible disk and file backed cache." + def __init__(self, directory, params): + """Initialize DjangoCache instance. + + :param str directory: cache directory + :param dict params: cache parameters + + """ + super(DjangoCache, self).__init__(params) + shards = params.get('SHARDS', 8) + timeout = params.get('DATABASE_TIMEOUT', 0.010) + options = params.get('OPTIONS', {}) + self._directory = directory + self._cache = FanoutCache(directory, shards, timeout, **options) + self.memoize = self._cache.memoize + + + @property + def directory(self): + """Cache directory.""" + return self._directory + + + def deque(self, name): + """Return Deque with given `name` in subdirectory. + + :param str name: subdirectory name for Deque + :return: Deque with given name + + """ + return self._cache.deque(name) + + + def index(self, name): + """Return Index with given `name` in subdirectory. + + :param str name: subdirectory name for Index + :return: Index with given name + + """ + return self._cache.index(name) + + + def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, + read=False, tag=None, retry=True): + """Set a value in the cache if the key does not already exist. If + timeout is given, that timeout will be used for the key; otherwise the + default cache timeout will be used. + + Return True if the value was stored, False otherwise. + + :param key: key for item + :param value: value for item + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout expires (default True) + :return: True if item was added + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + timeout = self.get_backend_timeout(timeout=timeout) + return self._cache.add(key, value, timeout, read, tag, retry) + + + def get(self, key, default=None, version=None, read=False, + expire_time=False, tag=False, retry=False): + """Fetch a given key from the cache. If the key does not exist, return + default, which itself defaults to None. + + :param key: key for item + :param default: return value if key is missing (default None) + :param int version: key version number (default None, cache parameter) + :param bool read: if True, return file handle to value + (default False) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout expires (default False) + :return: value for item if key is found else default + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + return self._cache.get(key, default, read, expire_time, tag, retry) + + + def read(self, key, version=None): + """Return file handle corresponding to `key` from Cache. + + :param key: Python key to retrieve + :param int version: key version number (default None, cache parameter) + :return: file open for reading in binary mode + :raises KeyError: if key is not found + + """ + key = self.make_key(key, version=version) + return self._cache.read(key) + + + def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, + read=False, tag=None, retry=True): + """Set a value in the cache. If timeout is given, that timeout will be + used for the key; otherwise the default cache timeout will be used. + + :param key: key for item + :param value: value for item + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout expires (default True) + :return: True if item was set + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + timeout = self.get_backend_timeout(timeout=timeout) + return self._cache.set(key, value, timeout, read, tag, retry) + + + def pop(self, key, default=None, version=None, expire_time=False, + tag=False, retry=True): + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + :param key: key for item + :param default: return value if key is missing (default None) + :param int version: key version number (default None, cache parameter) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout expires (default True) + :return: value for item if key is found else default + + """ + key = self.make_key(key, version=version) + return self._cache.pop(key, default, expire_time, tag, retry) + + + def delete(self, key, version=None, retry=True): + """Delete a key from the cache, failing silently. + + :param key: key for item + :param int version: key version number (default None, cache parameter) + :param bool retry: retry if database timeout expires (default True) + :return: True if item was deleted + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + self._cache.delete(key, retry) + + + def incr(self, key, delta=1, version=None, default=None, retry=True): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int version: key version number (default None, cache parameter) + :param int default: value if key is missing (default None) + :param bool retry: retry if database timeout expires (default True) + :return: new value for item on success else None + :raises ValueError: if key is not found and default is None + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + try: + return self._cache.incr(key, delta, default, retry) + except KeyError: + raise ValueError("Key '%s' not found" % key) + + + def decr(self, key, delta=1, version=None, default=None, retry=True): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int version: key version number (default None, cache parameter) + :param int default: value if key is missing (default None) + :param bool retry: retry if database timeout expires (default True) + :return: new value for item on success else None + :raises ValueError: if key is not found and default is None + + """ + # pylint: disable=arguments-differ + return self.incr(key, -delta, version, default, retry) + + + def has_key(self, key, version=None): + """Returns True if the key is in the cache and has not expired. + + :param key: key for item + :param int version: key version number (default None, cache parameter) + :return: True if key is found + + """ + key = self.make_key(key, version=version) + return key in self._cache + + + def expire(self): + """Remove expired items from cache. + + :return: count of items removed + + """ + return self._cache.expire() + + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + return self._cache.stats(enable=enable, reset=reset) + + + def create_tag_index(self): + """Create tag index on cache database. + + It is better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout expires + + """ + self._cache.create_tag_index() + + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout expires + + """ + self._cache.drop_tag_index() + + + def evict(self, tag): + """Remove items with matching `tag` from cache. + + :param str tag: tag identifying items + :return: count of items removed + + """ + return self._cache.evict(tag) + + + def clear(self, **kwargs): + "Remove *all* values from the cache at once." + # pylint: disable=unused-argument + return self._cache.clear() + + + def close(self, **kwargs): + "Close the cache connection." + # pylint: disable=unused-argument + self._cache.close() + + + def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): + """Return seconds to expiration. + + :param float timeout: seconds until the item expires + (default 300 seconds) + + """ + if timeout == DEFAULT_TIMEOUT: + timeout = self.default_timeout + elif timeout == 0: + # ticket 21147 - avoid time.time() related precision issues + timeout = -1 + return None if timeout is None else timeout diff --git a/ext/diskcache/fanout.py b/ext/diskcache/fanout.py new file mode 100644 index 0000000000..444f0dd305 --- /dev/null +++ b/ext/diskcache/fanout.py @@ -0,0 +1,571 @@ +"Fanout cache automatically shards keys and values." + +import itertools as it +import os.path as op +import sqlite3 +import time + +from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout +from .memo import memoize +from .persistent import Deque, Index + + +class FanoutCache(object): + "Cache that shards keys and values." + def __init__(self, directory, shards=8, timeout=0.010, disk=Disk, + **settings): + """Initialize cache instance. + + :param str directory: cache directory + :param int shards: number of shards to distribute writes + :param float timeout: SQLite connection timeout + :param disk: `Disk` instance for serialization + :param settings: any of `DEFAULT_SETTINGS` + + """ + self._directory = directory + self._count = shards + default_size_limit = DEFAULT_SETTINGS['size_limit'] + size_limit = settings.pop('size_limit', default_size_limit) / shards + self._shards = tuple( + Cache( + op.join(directory, '%03d' % num), + timeout=timeout, + disk=disk, + size_limit=size_limit, + **settings + ) + for num in range(shards) + ) + self._hash = self._shards[0].disk.hash + self._deques = {} + self._indexes = {} + + + @property + def directory(self): + """Cache directory.""" + return self._directory + + + def __getattr__(self, name): + return getattr(self._shards[0], name) + + + def set(self, key, value, expire=None, read=False, tag=None, retry=False): + """Set `key` and `value` item in cache. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as raw bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout expires (default False) + :return: True if item was set + + """ + index = self._hash(key) % self._count + set_func = self._shards[index].set + + while True: + try: + return set_func(key, value, expire, read, tag) + except Timeout: + if retry: + continue + else: + return False + + + def __setitem__(self, key, value): + """Set `key` and `value` item in cache. + + :param key: key for item + :param value: value for item + + """ + self.set(key, value, retry=True) + + + def add(self, key, value, expire=None, read=False, tag=None, retry=False): + """Add `key` and `value` item to cache. + + Similar to `set`, but only add to cache if key not present. + + This operation is atomic. Only one concurrent add operation for given + key from separate threads or processes will succeed. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout expires (default False) + :return: True if item was added + + """ + index = self._hash(key) % self._count + add_func = self._shards[index].add + + while True: + try: + return add_func(key, value, expire, read, tag) + except Timeout: + if retry: + continue + else: + return False + + + def incr(self, key, delta=1, default=0, retry=False): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout expires (default False) + :return: new value for item on success else None + :raises KeyError: if key is not found and default is None + + """ + index = self._hash(key) % self._count + incr_func = self._shards[index].incr + + while True: + try: + return incr_func(key, delta, default) + except Timeout: + if retry: + continue + else: + return None + + + def decr(self, key, delta=1, default=0, retry=False): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout expires (default False) + :return: new value for item on success else None + :raises KeyError: if key is not found and default is None + + """ + return self.incr(key, -delta, default, retry) + + + def get(self, key, default=None, read=False, expire_time=False, tag=False, + retry=False): + """Retrieve value from cache. If `key` is missing, return `default`. + + :param key: key for item + :param default: return value if key is missing (default None) + :param bool read: if True, return file handle to value + (default False) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout expires (default False) + :return: value for item if key is found else default + + """ + index = self._hash(key) % self._count + get_func = self._shards[index].get + + while True: + try: + return get_func( + key, default=default, read=read, expire_time=expire_time, + tag=tag, + ) + except (Timeout, sqlite3.OperationalError): + if retry: + continue + else: + return default + + + def __getitem__(self, key): + """Return corresponding value for `key` from cache. + + :param key: key for item + :return: value for item + :raises KeyError: if key is not found + + """ + value = self.get(key, default=ENOVAL, retry=True) + + if value is ENOVAL: + raise KeyError(key) + + return value + + + def read(self, key): + """Return file handle corresponding to `key` from cache. + + :param key: key for item + :return: file open for reading in binary mode + :raises KeyError: if key is not found + + """ + handle = self.get(key, default=ENOVAL, read=True, retry=True) + if handle is ENOVAL: + raise KeyError(key) + return handle + + + def __contains__(self, key): + """Return `True` if `key` matching item is found in cache. + + :param key: key for item + :return: True if key is found + + """ + index = self._hash(key) % self._count + return key in self._shards[index] + + + def pop(self, key, default=None, expire_time=False, tag=False, + retry=False): + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + :param key: key for item + :param default: return value if key is missing (default None) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout expires (default False) + :return: value for item if key is found else default + + """ + index = self._hash(key) % self._count + pop_func = self._shards[index].pop + + while True: + try: + return pop_func( + key, default=default, expire_time=expire_time, tag=tag, + ) + except Timeout: + if retry: + continue + else: + return default + + + def delete(self, key, retry=False): + """Delete corresponding item for `key` from cache. + + Missing keys are ignored. + + :param key: key for item + :param bool retry: retry if database timeout expires (default False) + :return: True if item was deleted + + """ + index = self._hash(key) % self._count + del_func = self._shards[index].__delitem__ + + while True: + try: + return del_func(key) + except Timeout: + if retry: + continue + else: + return False + except KeyError: + return False + + + def __delitem__(self, key): + """Delete corresponding item for `key` from cache. + + :param key: key for item + :raises KeyError: if key is not found + + """ + deleted = self.delete(key, retry=True) + + if not deleted: + raise KeyError(key) + + + memoize = memoize + + + def check(self, fix=False): + """Check database and file system consistency. + + Intended for use in testing and post-mortem error analysis. + + While checking the cache table for consistency, a writer lock is held + on the database. The lock blocks other cache clients from writing to + the database. For caches with many file references, the lock may be + held for a long time. For example, local benchmarking shows that a + cache with 1,000 file references takes ~60ms to check. + + :param bool fix: correct inconsistencies + :return: list of warnings + :raises Timeout: if database timeout expires + + """ + return sum((shard.check(fix=fix) for shard in self._shards), []) + + + def expire(self): + """Remove expired items from cache. + + :return: count of items removed + + """ + return self._remove('expire', args=(time.time(),)) + + + def create_tag_index(self): + """Create tag index on cache database. + + It is better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout expires + + """ + for shard in self._shards: + shard.create_tag_index() + + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout expires + + """ + for shard in self._shards: + shard.drop_tag_index() + + + def evict(self, tag): + """Remove items with matching `tag` from cache. + + :param str tag: tag identifying items + :return: count of items removed + + """ + return self._remove('evict', args=(tag,)) + + + def clear(self): + """Remove all items from cache. + + :return: count of items removed + + """ + return self._remove('clear') + + + def _remove(self, name, args=()): + total = 0 + for shard in self._shards: + method = getattr(shard, name) + while True: + try: + count = method(*args) + total += count + except Timeout as timeout: + total += timeout.args[0] + else: + break + return total + + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + results = [shard.stats(enable, reset) for shard in self._shards] + return (sum(result[0] for result in results), + sum(result[1] for result in results)) + + + def volume(self): + """Return estimated total size of cache on disk. + + :return: size in bytes + + """ + return sum(shard.volume() for shard in self._shards) + + + def close(self): + "Close database connection." + for shard in self._shards: + shard.close() + self._deques.clear() + self._indexes.clear() + + + def __enter__(self): + return self + + + def __exit__(self, *exception): + self.close() + + + def __getstate__(self): + return (self._directory, self._count, self.timeout, type(self.disk)) + + + def __setstate__(self, state): + self.__init__(*state) + + + def __iter__(self): + "Iterate keys in cache including expired items." + iterators = [iter(shard) for shard in self._shards] + return it.chain.from_iterable(iterators) + + + def __reversed__(self): + "Reverse iterate keys in cache including expired items." + iterators = [reversed(shard) for shard in self._shards] + return it.chain.from_iterable(reversed(iterators)) + + + def __len__(self): + "Count of items in cache including expired items." + return sum(len(shard) for shard in self._shards) + + + def reset(self, key, value=ENOVAL): + """Reset `key` and `value` item from Settings table. + + If `value` is not given, it is reloaded from the Settings + table. Otherwise, the Settings table is updated. + + Settings attributes on cache objects are lazy-loaded and + read-only. Use `reset` to update the value. + + Settings with the ``sqlite_`` prefix correspond to SQLite + pragmas. Updating the value will execute the corresponding PRAGMA + statement. + + :param str key: Settings key for item + :param value: value for item (optional) + :return: updated value for item + :raises Timeout: if database timeout expires + + """ + for shard in self._shards: + while True: + try: + result = shard.reset(key, value) + except Timeout: + pass + else: + break + return result + + + def deque(self, name): + """Return Deque with given `name` in subdirectory. + + >>> cache = FanoutCache('/tmp/diskcache/fanoutcache') + >>> deque = cache.deque('test') + >>> deque.clear() + >>> deque.extend('abc') + >>> deque.popleft() + 'a' + >>> deque.pop() + 'c' + >>> len(deque) + 1 + + :param str name: subdirectory name for Deque + :return: Deque with given name + + """ + _deques = self._deques + + try: + return _deques[name] + except KeyError: + parts = name.split('/') + directory = op.join(self._directory, 'deque', *parts) + temp = Deque(directory=directory) + _deques[name] = temp + return temp + + + def index(self, name): + """Return Index with given `name` in subdirectory. + + >>> cache = FanoutCache('/tmp/diskcache/fanoutcache') + >>> index = cache.index('test') + >>> index.clear() + >>> index['abc'] = 123 + >>> index['def'] = 456 + >>> index['ghi'] = 789 + >>> index.popitem() + ('ghi', 789) + >>> del index['abc'] + >>> len(index) + 1 + >>> index['def'] + 456 + + :param str name: subdirectory name for Index + :return: Index with given name + + """ + _indexes = self._indexes + + try: + return _indexes[name] + except KeyError: + parts = name.split('/') + directory = op.join(self._directory, 'index', *parts) + temp = Index(directory) + _indexes[name] = temp + return temp diff --git a/ext/diskcache/memo.py b/ext/diskcache/memo.py new file mode 100644 index 0000000000..3a2243a8d7 --- /dev/null +++ b/ext/diskcache/memo.py @@ -0,0 +1,105 @@ +"""Memoization utilities. + +""" + +from functools import wraps + +from .core import ENOVAL + +def memoize(cache, name=None, typed=False, expire=None, tag=None): + """Memoizing cache decorator. + + Decorator to wrap callable with memoizing function using cache. Repeated + calls with the same arguments will lookup result in cache and avoid + function evaluation. + + If name is set to None (default), the callable name will be determined + automatically. + + If typed is set to True, function arguments of different types will be + cached separately. For example, f(3) and f(3.0) will be treated as distinct + calls with distinct results. + + The original underlying function is accessible through the __wrapped__ + attribute. This is useful for introspection, for bypassing the cache, or + for rewrapping the function with a different cache. + + >>> from diskcache import FanoutCache + >>> cache = FanoutCache('/tmp/diskcache/fanoutcache') + >>> @cache.memoize(typed=True, expire=1, tag='fib') + ... def fibonacci(number): + ... if number == 0: + ... return 0 + ... elif number == 1: + ... return 1 + ... else: + ... return fibonacci(number - 1) + fibonacci(number - 2) + >>> print(sum(fibonacci(number=value) for value in range(100))) + 573147844013817084100 + + Remember to call memoize when decorating a callable. If you forget, then a + TypeError will occur. Note the lack of parenthenses after memoize below: + + >>> @cache.memoize + ... def test(): + ... pass + Traceback (most recent call last): + ... + TypeError: name cannot be callable + + :param cache: cache to store callable arguments and return values + :param str name: name given for callable (default None, automatic) + :param bool typed: cache different types separately (default False) + :param float expire: seconds until arguments expire + (default None, no expiry) + :param str tag: text to associate with arguments (default None) + :return: callable decorator + + """ + if callable(name): + raise TypeError('name cannot be callable') + + def decorator(function): + "Decorator created by memoize call for callable." + if name is None: + try: + reference = function.__qualname__ + except AttributeError: + reference = function.__name__ + + reference = function.__module__ + reference + else: + reference = name + + reference = (reference,) + + @wraps(function) + def wrapper(*args, **kwargs): + "Wrapper for callable to cache arguments and return values." + + key = reference + args + + if kwargs: + key += (ENOVAL,) + sorted_items = sorted(kwargs.items()) + + for item in sorted_items: + key += item + + if typed: + key += tuple(type(arg) for arg in args) + + if kwargs: + key += tuple(type(value) for _, value in sorted_items) + + result = cache.get(key, default=ENOVAL, retry=True) + + if result is ENOVAL: + result = function(*args, **kwargs) + cache.set(key, result, expire=expire, tag=tag, retry=True) + + return result + + return wrapper + + return decorator diff --git a/ext/diskcache/persistent.py b/ext/diskcache/persistent.py new file mode 100644 index 0000000000..ae7889ef57 --- /dev/null +++ b/ext/diskcache/persistent.py @@ -0,0 +1,1313 @@ +"""Persistent Data Types + +""" + +import operator as op +import sys + +from collections import MutableMapping, OrderedDict, Sequence +from collections import KeysView, ValuesView, ItemsView +from itertools import islice +from shutil import rmtree +from tempfile import mkdtemp + +from .core import BytesType, Cache, ENOVAL, TextType, Timeout + +if sys.hexversion < 0x03000000: + from itertools import izip as zip # pylint: disable=redefined-builtin,ungrouped-imports,wrong-import-order + range = xrange # pylint: disable=redefined-builtin,invalid-name + + +def _make_compare(seq_op, doc): + "Make compare method with Sequence semantics." + def compare(self, that): + "Compare method for deque and sequence." + if not isinstance(that, Sequence): + return NotImplemented + + len_self = len(self) + len_that = len(that) + + if len_self != len_that: + if seq_op is op.eq: + return False + if seq_op is op.ne: + return True + + for alpha, beta in zip(self, that): + if alpha != beta: + return seq_op(alpha, beta) + + return seq_op(len_self, len_that) + + compare.__name__ = '__{0}__'.format(seq_op.__name__) + doc_str = 'Return True if and only if deque is {0} `that`.' + compare.__doc__ = doc_str.format(doc) + + return compare + + +class Deque(Sequence): + """Persistent sequence with double-ended queue semantics. + + Double-ended queue is an ordered collection with optimized access at its + endpoints. + + Items are serialized to disk. Deque may be initialized from directory path + where items are stored. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque + Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += range(5) + >>> list(deque) + [0, 1, 2, 3, 4] + >>> for value in range(5): + ... deque.appendleft(-value) + >>> len(deque) + 10 + >>> list(deque) + [-4, -3, -2, -1, 0, 0, 1, 2, 3, 4] + >>> deque.pop() + 4 + >>> deque.popleft() + -4 + >>> deque.reverse() + >>> list(deque) + [3, 2, 1, 0, 0, -1, -2, -3] + + """ + def __init__(self, iterable=(), directory=None): + """Initialize deque instance. + + If directory is None then temporary directory created. The directory + will *not* be automatically removed. + + :param iterable: iterable of items to append to deque + :param directory: deque directory (default None) + + """ + if directory is None: + directory = mkdtemp() + self._cache = Cache(directory, eviction_policy='none') + self.extend(iterable) + + + @classmethod + def fromcache(cls, cache, iterable=()): + """Initialize deque using `cache`. + + >>> cache = Cache('/tmp/diskcache/index') + >>> _ = cache.clear() + >>> deque = Deque.fromcache(cache, [5, 6, 7, 8]) + >>> len(deque) + 4 + >>> 7 in deque + True + >>> deque.popleft() + 5 + + :param Cache cache: cache to use + :param iterable: iterable of items + :return: initialized Deque + + """ + # pylint: disable=no-member,protected-access + self = cls.__new__(cls) + self._cache = cache + self.extend(iterable) + return self + + + @property + def directory(self): + "Directory path where deque is stored." + return self._cache.directory + + + def _key(self, index): + len_self = len(self) + + if index < 0: + index += len_self + if index < 0: + raise IndexError('deque index out of range') + elif index >= len_self: + raise IndexError('deque index out of range') + + diff = len_self - index - 1 + _cache_iterkeys = self._cache.iterkeys + + try: + if index <= diff: + iter_keys = _cache_iterkeys() + key = next(islice(iter_keys, index, index + 1)) + else: + iter_keys = _cache_iterkeys(reverse=True) + key = next(islice(iter_keys, diff, diff + 1)) + except StopIteration: + raise IndexError('deque index out of range') + + return key + + + def __getitem__(self, index): + """deque.__getitem__(index) <==> deque[index] + + Return corresponding item for `index` in deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.extend('abcde') + >>> deque[0] + 'a' + >>> deque[-1] + 'e' + >>> deque[2] + 'c' + + :param int index: index of item + :return: corresponding item + :raises IndexError: if index out of range + + """ + _key = self._key + _cache = self._cache + + while True: + try: + key = _key(index) + return _cache[key] + except (KeyError, Timeout): + continue + + + def __setitem__(self, index, value): + """deque.__setitem__(index, value) <==> deque[index] = value + + Store `value` in deque at `index`. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.extend([None] * 3) + >>> deque[0] = 'a' + >>> deque[1] = 'b' + >>> deque[-1] = 'c' + >>> ''.join(deque) + 'abc' + + :param int index: index of value + :param value: value to store + :raises IndexError: if index out of range + + """ + _key = self._key + _cache = self._cache + + while True: + try: + key = _key(index) + _cache[key] = value + return + except Timeout: + continue + + + def __delitem__(self, index): + """deque.__delitem__(index) <==> del deque[index] + + Delete item in deque at `index`. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.extend([None] * 3) + >>> del deque[0] + >>> del deque[1] + >>> del deque[-1] + >>> len(deque) + 0 + + :param int index: index of item + :raises IndexError: if index out of range + + """ + _key = self._key + _cache = self._cache + + while True: + try: + key = _key(index) + del _cache[key] + return + except (KeyError, Timeout): + continue + + + def __repr__(self): + """deque.__repr__() <==> repr(deque) + + Return string with printable representation of deque. + + """ + name = type(self).__name__ + return '{0}(directory={1!r})'.format(name, self.directory) + + + __eq__ = _make_compare(op.eq, 'equal to') + __ne__ = _make_compare(op.ne, 'not equal to') + __lt__ = _make_compare(op.lt, 'less than') + __gt__ = _make_compare(op.gt, 'greater than') + __le__ = _make_compare(op.le, 'less than or equal to') + __ge__ = _make_compare(op.ge, 'greater than or equal to') + + + def __iadd__(self, iterable): + """deque.__iadd__(iterable) <==> deque += iterable + + Extend back side of deque with items from iterable. + + """ + self.extend(iterable) + return self + + + def __iter__(self): + """deque.__iter__() <==> iter(deque) + + Return iterator of deque from front to back. + + """ + _cache = self._cache + + for key in _cache.iterkeys(): + try: + yield _cache[key] + except (KeyError, Timeout): + pass + + + def __len__(self): + """deque.__len__() <==> len(deque) + + Return length of deque. + + """ + return len(self._cache) + + + def __reversed__(self): + """deque.__reversed__() <==> reversed(deque) + + Return iterator of deque from back to front. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.extend('abcd') + >>> iterator = reversed(deque) + >>> next(iterator) + 'd' + >>> list(iterator) + ['c', 'b', 'a'] + + """ + _cache = self._cache + + for key in _cache.iterkeys(reverse=True): + try: + yield _cache[key] + except (KeyError, Timeout): + pass + + + def __getstate__(self): + return self.directory + + + def __setstate__(self, state): + self.__init__(directory=state) + + + def append(self, value): + """Add `value` to back of deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.append('a') + >>> deque.append('b') + >>> deque.append('c') + >>> list(deque) + ['a', 'b', 'c'] + + :param value: value to add to back of deque + + """ + _cache_push = self._cache.push + + while True: + try: + _cache_push(value) + return + except Timeout: + continue + + + def appendleft(self, value): + """Add `value` to front of deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.appendleft('a') + >>> deque.appendleft('b') + >>> deque.appendleft('c') + >>> list(deque) + ['c', 'b', 'a'] + + :param value: value to add to front of deque + + """ + _cache_push = self._cache.push + + while True: + try: + _cache_push(value, side='front') + return + except Timeout: + continue + + + def clear(self): + """Remove all elements from deque. + + """ + _cache_clear = self._cache.clear + + while True: + try: + _cache_clear() + return + except Timeout: + continue + + + def count(self, value): + """Return number of occurrences of `value` in deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += [num for num in range(1, 5) for _ in range(num)] + >>> deque.count(0) + 0 + >>> deque.count(1) + 1 + >>> deque.count(4) + 4 + + :param value: value to count in deque + + """ + return sum(1 for item in self if item == value) + + + def extend(self, iterable): + """Extend back side of deque with values from `iterable`. + + :param iterable: iterable of values + + """ + for value in iterable: + self.append(value) + + + def extendleft(self, iterable): + """Extend front side of deque with value from `iterable`. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque.extendleft('abc') + >>> list(deque) + ['c', 'b', 'a'] + + :param iterable: iterable of values + + """ + for value in iterable: + self.appendleft(value) + + + def pop(self): + """Remove and return value at back of deque. + + If deque is empty then raise IndexError. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += 'ab' + >>> deque.pop() + 'b' + >>> deque.pop() + 'a' + >>> deque.pop() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + + :raises IndexError: if deque is empty + + """ + _cache_pull = self._cache.pull + + while True: + try: + default = None, ENOVAL + _, value = _cache_pull(default=default, side='back') + except Timeout: + continue + else: + if value is ENOVAL: + raise IndexError('pop from an empty deque') + return value + + + def popleft(self): + """Remove and return value at front of deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += 'ab' + >>> deque.popleft() + 'a' + >>> deque.popleft() + 'b' + >>> deque.popleft() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + + """ + _cache_pull = self._cache.pull + + while True: + try: + default = None, ENOVAL + _, value = _cache_pull(default=default) + except Timeout: + continue + else: + if value is ENOVAL: + raise IndexError('pop from an empty deque') + return value + + + def remove(self, value): + """Remove first occurrence of `value` in deque. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += 'aab' + >>> deque.remove('a') + >>> list(deque) + ['a', 'b'] + >>> deque.remove('b') + >>> list(deque) + ['a'] + >>> deque.remove('c') + Traceback (most recent call last): + ... + ValueError: deque.remove(value): value not in deque + + :param value: value to remove + :raises ValueError: if value not in deque + + """ + _cache = self._cache + + for key in _cache.iterkeys(): + try: + while True: + try: + item = _cache[key] + except Timeout: + continue + else: + break + except KeyError: + continue + else: + if value == item: + try: + while True: + try: + del _cache[key] + except Timeout: + continue + else: + return + except KeyError: + continue + + raise ValueError('deque.remove(value): value not in deque') + + + def reverse(self): + """Reverse deque in place. + + """ + # pylint: disable=protected-access + directory = mkdtemp() + temp = None + + try: + temp = Deque(iterable=reversed(self), directory=directory) + self.clear() + self.extend(temp) + finally: + temp._cache.close() + del temp + rmtree(directory) + + + def rotate(self, steps=1): + """Rotate deque right by `steps`. + + If steps is negative then rotate left. + + >>> deque = Deque(directory='/tmp/diskcache/deque') + >>> deque.clear() + >>> deque += range(5) + >>> deque.rotate(2) + >>> list(deque) + [3, 4, 0, 1, 2] + >>> deque.rotate(-1) + >>> list(deque) + [4, 0, 1, 2, 3] + + :param int steps: number of steps to rotate (default 1) + + """ + if not isinstance(steps, int): + type_name = type(steps).__name__ + raise TypeError('integer argument expected, got %s' % type_name) + + len_self = len(self) + + if not len_self: + return + + if steps >= 0: + steps %= len_self + + for _ in range(steps): + try: + value = self.pop() + except IndexError: + return + else: + self.appendleft(value) + else: + steps *= -1 + steps %= len_self + + for _ in range(steps): + try: + value = self.popleft() + except IndexError: + return + else: + self.append(value) + + + def __del__(self): + self._cache.close() + + + __hash__ = None + + +class Index(MutableMapping): + """Persistent mutable mapping with insertion order iteration. + + Items are serialized to disk. Index may be initialized from directory path + where items are stored. + + Hashing protocol is not used. Keys are looked up by their serialized + format. See ``diskcache.Disk`` for details. + + >>> index = Index('/tmp/diskcache/index') + >>> index + Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index['a'] + 1 + >>> list(index) + ['a', 'b', 'c'] + >>> len(index) + 3 + >>> del index['b'] + >>> index.popitem() + ('c', 3) + + """ + def __init__(self, *args, **kwargs): + """Initialize index in directory and update items. + + Optional first argument may be string specifying directory where items + are stored. When None or not given, temporary directory is created. + + >>> index = Index({'a': 1, 'b': 2, 'c': 3}) + >>> len(index) + 3 + >>> directory = index.directory + >>> inventory = Index(directory, d=4) + >>> inventory['b'] + 2 + >>> len(inventory) + 4 + + """ + if args and isinstance(args[0], (BytesType, TextType)): + directory = args[0] + args = args[1:] + else: + if args and args[0] is None: + args = args[1:] + directory = mkdtemp(prefix='diskcache-') + self._cache = Cache(directory, eviction_policy='none') + self.update(*args, **kwargs) + + + @classmethod + def fromcache(cls, cache, *args, **kwargs): + """Initialize index using `cache` and update items. + + >>> cache = Cache('/tmp/diskcache/index') + >>> _ = cache.clear() + >>> index = Index.fromcache(cache, {'a': 1, 'b': 2, 'c': 3}) + >>> len(index) + 3 + >>> 'b' in index + True + >>> index['c'] + 3 + + :param Cache cache: cache to use + :param args: mapping or sequence of items + :param kwargs: mapping of items + :return: initialized Index + + """ + # pylint: disable=no-member,protected-access + self = cls.__new__(cls) + self._cache = cache + self.update(*args, **kwargs) + return self + + + @property + def directory(self): + "Directory path where items are stored." + return self._cache.directory + + + def __getitem__(self, key): + """index.__getitem__(key) <==> index[key] + + Return corresponding value for `key` in index. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2}) + >>> index['a'] + 1 + >>> index['b'] + 2 + >>> index['c'] + Traceback (most recent call last): + ... + KeyError: 'c' + + :param key: key for item + :raises KeyError: if key is not found + + """ + _cache = self._cache + + while True: + try: + return _cache[key] + except Timeout: + continue + + + def __setitem__(self, key, value): + """index.__setitem__(key, value) <==> index[key] = value + + Set `key` and `value` item in index. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index['a'] = 1 + >>> index[0] = None + >>> len(index) + 2 + + :param key: key for item + :param value: value for item + + """ + _cache = self._cache + + while True: + try: + _cache[key] = value + except Timeout: + continue + else: + return + + + def __delitem__(self, key): + """index.__delitem__(key) <==> del index[key] + + Delete corresponding item for `key` from index. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2}) + >>> del index['a'] + >>> del index['b'] + >>> len(index) + 0 + >>> del index['c'] + Traceback (most recent call last): + ... + KeyError: 'c' + + :param key: key for item + :raises KeyError: if key is not found + + """ + _cache = self._cache + + while True: + try: + del _cache[key] + except Timeout: + continue + else: + return + + + def pop(self, key, default=ENOVAL): + """Remove corresponding item for `key` from index and return value. + + If `key` is missing then return `default`. If `default` is `ENOVAL` + then raise KeyError. + + >>> index = Index('/tmp/diskcache/index', {'a': 1, 'b': 2}) + >>> index.pop('a') + 1 + >>> index.pop('b') + 2 + >>> index.pop('c', default=3) + 3 + >>> index.pop('d') + Traceback (most recent call last): + ... + KeyError: 'd' + + :param key: key for item + :param default: return value if key is missing (default ENOVAL) + :return: value for item if key is found else default + :raises KeyError: if key is not found and default is ENOVAL + + """ + + _cache = self._cache + + while True: + try: + value = _cache.pop(key, default=default) + except Timeout: + continue + else: + if value is ENOVAL: + raise KeyError(key) + return value + + + def popitem(self, last=True): + """Remove and return item pair. + + Item pairs are returned in last-in-first-out (LIFO) order if last is + True else first-in-first-out (FIFO) order. LIFO order imitates a stack + and FIFO order imitates a queue. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index.popitem() + ('c', 3) + >>> index.popitem(last=False) + ('a', 1) + >>> index.popitem() + ('b', 2) + >>> index.popitem() + Traceback (most recent call last): + ... + KeyError + + :param bool last: pop last item pair (default True) + :return: key and value item pair + :raises KeyError: if index is empty + + """ + # pylint: disable=arguments-differ + _cache = self._cache + + while True: + try: + if last: + key = next(reversed(_cache)) + else: + key = next(iter(_cache)) + except StopIteration: + raise KeyError + + try: + value = _cache.pop(key) + except (KeyError, Timeout): + continue + else: + return key, value + + + def push(self, value, prefix=None, side='back'): + """Push `value` onto `side` of queue in index identified by `prefix`. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + Defaults to pushing value on back of queue. Set side to 'front' to push + value on front of queue. Side must be one of 'back' or 'front'. + + See also `Index.pull`. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> print(index.push('apples')) + 500000000000000 + >>> print(index.push('beans')) + 500000000000001 + >>> print(index.push('cherries', side='front')) + 499999999999999 + >>> index[500000000000001] + 'beans' + >>> index.push('dates', prefix='fruit') + 'fruit-500000000000000' + + :param value: value for item + :param str prefix: key prefix (default None, key is integer) + :param str side: either 'back' or 'front' (default 'back') + :return: key for item in cache + + """ + _cache_push = self._cache.push + + while True: + try: + return _cache_push(value, prefix, side) + except Timeout: + continue + + + def pull(self, prefix=None, default=(None, None), side='front'): + """Pull key and value item pair from `side` of queue in index. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + If queue is empty, return default. + + Defaults to pulling key and value item pairs from front of queue. Set + side to 'back' to pull from back of queue. Side must be one of 'front' + or 'back'. + + See also `Index.push`. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> for letter in 'abc': + ... print(index.push(letter)) + 500000000000000 + 500000000000001 + 500000000000002 + >>> key, value = index.pull() + >>> print(key) + 500000000000000 + >>> value + 'a' + >>> _, value = index.pull(side='back') + >>> value + 'c' + >>> index.pull(prefix='fruit') + (None, None) + + :param str prefix: key prefix (default None, key is integer) + :param default: value to return if key is missing + (default (None, None)) + :param str side: either 'front' or 'back' (default 'front') + :return: key and value item pair or default if queue is empty + + """ + _cache_pull = self._cache.pull + + while True: + try: + return _cache_pull(prefix, default, side) + except Timeout: + continue + + + def clear(self): + """Remove all items from index. + + """ + _cache_clear = self._cache.clear + + while True: + try: + _cache_clear() + return + except Timeout: + continue + + + def __iter__(self): + """index.__iter__() <==> iter(index) + + Return iterator of index keys in insertion order. + + """ + return iter(self._cache) + + + def __reversed__(self): + """index.__reversed__() <==> reversed(index) + + Return iterator of index keys in reversed insertion order. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> iterator = reversed(index) + >>> next(iterator) + 'c' + >>> list(iterator) + ['b', 'a'] + + """ + return reversed(self._cache) + + + def __len__(self): + """index.__len__() <==> len(index) + + Return length of index. + + """ + return len(self._cache) + + + if sys.hexversion < 0x03000000: + def keys(self): + """List of index keys. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index.keys() + ['a', 'b', 'c'] + + :return: list of keys + + """ + return list(self._cache) + + + def values(self): + """List of index values. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index.values() + [1, 2, 3] + + :return: list of values + + """ + return list(self.itervalues()) + + + def items(self): + """List of index items. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index.items() + [('a', 1), ('b', 2), ('c', 3)] + + :return: list of items + + """ + return list(self.iteritems()) + + + def iterkeys(self): + """Iterator of index keys. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> list(index.iterkeys()) + ['a', 'b', 'c'] + + :return: iterator of keys + + """ + return iter(self._cache) + + + def itervalues(self): + """Iterator of index values. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> list(index.itervalues()) + [1, 2, 3] + + :return: iterator of values + + """ + _cache = self._cache + + for key in _cache: + while True: + try: + yield _cache[key] + except KeyError: + break + except Timeout: + continue + else: + break + + + def iteritems(self): + """Iterator of index items. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> list(index.iteritems()) + [('a', 1), ('b', 2), ('c', 3)] + + :return: iterator of items + + """ + _cache = self._cache + + for key in _cache: + while True: + try: + yield key, _cache[key] + except KeyError: + break + except Timeout: + continue + else: + break + + + def viewkeys(self): + """Set-like object providing a view of index keys. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> keys_view = index.viewkeys() + >>> 'b' in keys_view + True + + :return: keys view + + """ + return KeysView(self) + + + def viewvalues(self): + """Set-like object providing a view of index values. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> values_view = index.viewvalues() + >>> 2 in values_view + True + + :return: values view + + """ + return ValuesView(self) + + + def viewitems(self): + """Set-like object providing a view of index items. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> items_view = index.viewitems() + >>> ('b', 2) in items_view + True + + :return: items view + + """ + return ItemsView(self) + + + else: + def keys(self): + """Set-like object providing a view of index keys. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> keys_view = index.keys() + >>> 'b' in keys_view + True + + :return: keys view + + """ + return KeysView(self) + + + def values(self): + """Set-like object providing a view of index values. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> values_view = index.values() + >>> 2 in values_view + True + + :return: values view + + """ + return ValuesView(self) + + + def items(self): + """Set-like object providing a view of index items. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> items_view = index.items() + >>> ('b', 2) in items_view + True + + :return: items view + + """ + return ItemsView(self) + + + __hash__ = None + + + def __getstate__(self): + return self.directory + + + def __setstate__(self, state): + self.__init__(state) + + + def __eq__(self, other): + """index.__eq__(other) <==> index == other + + Compare equality for index and `other`. + + Comparison to another index or ordered dictionary is + order-sensitive. Comparison to all other mappings is order-insensitive. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> pairs = [('a', 1), ('b', 2), ('c', 3)] + >>> index.update(pairs) + >>> from collections import OrderedDict + >>> od = OrderedDict(pairs) + >>> index == od + True + >>> index == {'c': 3, 'b': 2, 'a': 1} + True + + :param other: other mapping in equality comparison + + """ + if len(self) != len(other): + return False + + if isinstance(other, (Index, OrderedDict)): + alpha = ((key, self[key]) for key in self) + beta = ((key, other[key]) for key in other) + pairs = zip(alpha, beta) + return not any(a != x or b != y for (a, b), (x, y) in pairs) + else: + return all(self[key] == other.get(key, ENOVAL) for key in self) + + + def __ne__(self, other): + """index.__ne__(other) <==> index != other + + Compare inequality for index and `other`. + + Comparison to another index or ordered dictionary is + order-sensitive. Comparison to all other mappings is order-insensitive. + + >>> index = Index('/tmp/diskcache/index') + >>> index.clear() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> from collections import OrderedDict + >>> od = OrderedDict([('c', 3), ('b', 2), ('a', 1)]) + >>> index != od + True + >>> index != {'a': 1, 'b': 2} + True + + :param other: other mapping in inequality comparison + + """ + return not self == other + + + def __repr__(self): + """index.__repr__() <==> repr(index) + + Return string with printable representation of index. + + """ + name = type(self).__name__ + return '{0}({1!r})'.format(name, self.directory) + + + def __del__(self): + self._cache.close() diff --git a/ext/diskcache/stampede.py b/ext/diskcache/stampede.py new file mode 100644 index 0000000000..0c501f783a --- /dev/null +++ b/ext/diskcache/stampede.py @@ -0,0 +1,75 @@ +"Stampede barrier implementation." + +import functools as ft +import math +import random +import tempfile +import time + +from .core import Cache, ENOVAL + + +class StampedeBarrier(object): + """Stampede barrier mitigates cache stampedes. + + Cache stampedes are also known as dog-piling, cache miss storm, cache + choking, or the thundering herd problem. + + Based on research by Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), + Optimal Probabilistic Cache Stampede Prevention, + VLDB, pp. 886?897, ISSN 2150-8097 + + Example: + + >>> stampede_barrier = StampedeBarrier('/tmp/user_data', expire=3) + >>> @stampede_barrier + def load_user_info(user_id): + return database.lookup_user_info_by_id(user_id) + + """ + # pylint: disable=too-few-public-methods + def __init__(self, cache=None, expire=None): + if isinstance(cache, Cache): + pass + elif cache is None: + cache = Cache(tempfile.mkdtemp()) + else: + cache = Cache(cache) + + self._cache = cache + self._expire = expire + + def __call__(self, func): + cache = self._cache + expire = self._expire + + @ft.wraps(func) + def wrapper(*args, **kwargs): + "Wrapper function to cache function result." + key = (args, kwargs) + + try: + result, expire_time, delta = cache.get( + key, default=ENOVAL, expire_time=True, tag=True + ) + + if result is ENOVAL: + raise KeyError + + now = time.time() + ttl = expire_time - now + + if (-delta * math.log(random.random())) < ttl: + return result + + except KeyError: + pass + + now = time.time() + result = func(*args, **kwargs) + delta = time.time() - now + cache.set(key, result, expire=expire, tag=delta) + + return result + + return wrapper diff --git a/ext/funcsigs/__init__.py b/ext/funcsigs/__init__.py new file mode 100644 index 0000000000..5f5378b42a --- /dev/null +++ b/ext/funcsigs/__init__.py @@ -0,0 +1,829 @@ +# Copyright 2001-2013 Python Software Foundation; All Rights Reserved +"""Function signature objects for callables + +Back port of Python 3.3's function signature tools from the inspect module, +modified to be compatible with Python 2.6, 2.7 and 3.3+. +""" +from __future__ import absolute_import, division, print_function +import itertools +import functools +import re +import types + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +from funcsigs.version import __version__ + +__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature'] + + +_WrapperDescriptor = type(type.__call__) +_MethodWrapper = type(all.__call__) + +_NonUserDefinedCallables = (_WrapperDescriptor, + _MethodWrapper, + types.BuiltinFunctionType) + + +def formatannotation(annotation, base_module=None): + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', '__builtin__', base_module): + return annotation.__name__ + return annotation.__module__+'.'+annotation.__name__ + return repr(annotation) + + +def _get_user_defined_method(cls, method_name, *nested): + try: + if cls is type: + return + meth = getattr(cls, method_name) + for name in nested: + meth = getattr(meth, name, meth) + except AttributeError: + return + else: + if not isinstance(meth, _NonUserDefinedCallables): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return meth + + +def signature(obj): + '''Get a signature object for the passed callable.''' + + if not callable(obj): + raise TypeError('{0!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + sig = signature(obj.__func__) + if obj.__self__ is None: + # Unbound method - preserve as-is. + return sig + else: + # Bound method. Eat self - if we can. + params = tuple(sig.parameters.values()) + + if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + raise ValueError('invalid method signature') + + kind = params[0].kind + if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): + # Drop first parameter: + # '(p1, p2[, ...])' -> '(p2[, ...])' + params = params[1:] + else: + if kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') + # It's a var-positional parameter. + # Do nothing. '(*args[, ...])' -> '(*args[, ...])' + + return sig.replace(parameters=params) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + return sig + + try: + # Was this function wrapped by a decorator? + wrapped = obj.__wrapped__ + except AttributeError: + pass + else: + return signature(wrapped) + + if isinstance(obj, types.FunctionType): + return Signature.from_function(obj) + + if isinstance(obj, functools.partial): + sig = signature(obj.func) + + new_params = OrderedDict(sig.parameters.items()) + + partial_args = obj.args or () + partial_keywords = obj.keywords or {} + try: + ba = sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {0!r} has incorrect arguments'.format(obj) + raise ValueError(msg) + + for arg_name, arg_value in ba.arguments.items(): + param = new_params[arg_name] + if arg_name in partial_keywords: + # We set a new default value, because the following code + # is correct: + # + # >>> def foo(a): print(a) + # >>> print(partial(partial(foo, a=10), a=20)()) + # 20 + # >>> print(partial(partial(foo, a=10), a=20)(a=30)) + # 30 + # + # So, with 'partial' objects, passing a keyword argument is + # like setting a new default value for the corresponding + # parameter + # + # We also mark this parameter with '_partial_kwarg' + # flag. Later, in '_bind', the 'default' value of this + # parameter will be added to 'kwargs', to simulate + # the 'functools.partial' real call. + new_params[arg_name] = param.replace(default=arg_value, + _partial_kwarg=True) + + elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and + not param._partial_kwarg): + new_params.pop(arg_name) + + return sig.replace(parameters=new_params.values()) + + sig = None + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _get_user_defined_method(type(obj), '__call__') + if call is not None: + sig = signature(call) + else: + # Now we check if the 'obj' class has a '__new__' method + new = _get_user_defined_method(obj, '__new__') + if new is not None: + sig = signature(new) + else: + # Finally, we should have at least __init__ implemented + init = _get_user_defined_method(obj, '__init__') + if init is not None: + sig = signature(init) + elif not isinstance(obj, _NonUserDefinedCallables): + # An object with __call__ + # We also check that the 'obj' is not an instance of + # _WrapperDescriptor or _MethodWrapper to avoid + # infinite recursion (and even potential segfault) + call = _get_user_defined_method(type(obj), '__call__', 'im_func') + if call is not None: + sig = signature(call) + + if sig is not None: + # For classes and objects we skip the first parameter of their + # __call__, __new__, or __init__ methods + return sig.replace(parameters=tuple(sig.parameters.values())[1:]) + + if isinstance(obj, types.BuiltinFunctionType): + # Raise a nicer error message for builtins + msg = 'no signature found for builtin function {0!r}'.format(obj) + raise ValueError(msg) + + raise ValueError('callable {0!r} is not supported by signature'.format(obj)) + + +class _void(object): + '''A private marker - used in Parameter & Signature''' + + +class _empty(object): + pass + + +class _ParameterKind(int): + def __new__(self, *args, **kwargs): + obj = int.__new__(self, *args) + obj._name = kwargs['name'] + return obj + + def __str__(self): + return self._name + + def __repr__(self): + return '<_ParameterKind: {0!r}>'.format(self._name) + + +_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY') +_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD') +_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL') +_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY') +_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD') + + +class Parameter(object): + '''Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is not set. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is not set. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + ''' + + __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, default=_empty, annotation=_empty, + _partial_kwarg=False): + + if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD, + _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD): + raise ValueError("invalid value for 'Parameter.kind' attribute") + self._kind = kind + + if default is not _empty: + if kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{0} parameters cannot have default values'.format(kind) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is None: + if kind != _POSITIONAL_ONLY: + raise ValueError("None is not a valid name for a " + "non-positional-only parameter") + self._name = name + else: + name = str(name) + if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I): + msg = '{0!r} is not a valid parameter name'.format(name) + raise ValueError(msg) + self._name = name + + self._partial_kwarg = _partial_kwarg + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, name=_void, kind=_void, annotation=_void, + default=_void, _partial_kwarg=_void): + '''Creates a customized copy of the Parameter.''' + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + if _partial_kwarg is _void: + _partial_kwarg = self._partial_kwarg + + return type(self)(name, kind, default=default, annotation=annotation, + _partial_kwarg=_partial_kwarg) + + def __str__(self): + kind = self.kind + + formatted = self._name + if kind == _POSITIONAL_ONLY: + if formatted is None: + formatted = '' + formatted = '<{0}>'.format(formatted) + + # Add annotation and default value + if self._annotation is not _empty: + formatted = '{0}:{1}'.format(formatted, + formatannotation(self._annotation)) + + if self._default is not _empty: + formatted = '{0}={1}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + def __repr__(self): + return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__, + id(self), self.name) + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + return (issubclass(other.__class__, Parameter) and + self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + def __ne__(self, other): + return not self.__eq__(other) + + +class BoundArguments(object): + '''Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + ''' + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + # Keyword arguments mapped by 'functools.partial' + # (Parameter._partial_kwarg is True) are mapped + # in 'BoundArguments.kwargs', along with VAR_KEYWORD & + # KEYWORD_ONLY + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or + param._partial_kwarg): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + return (issubclass(other.__class__, BoundArguments) and + self.signature == other.signature and + self.arguments == other.arguments) + + def __ne__(self, other): + return not self.__eq__(other) + + +class Signature(object): + '''A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is not set. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + ''' + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, return_annotation=_empty, + __validate_parameters__=True): + '''Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + ''' + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + + for idx, param in enumerate(parameters): + kind = param.kind + if kind < top_kind: + msg = 'wrong parameter order: {0} before {1}' + msg = msg.format(top_kind, param.kind) + raise ValueError(msg) + else: + top_kind = kind + + name = param.name + if name is None: + name = str(idx) + param = param.replace(name=name) + + if name in params: + msg = 'duplicate parameter name: {0!r}'.format(name) + raise ValueError(msg) + params[name] = param + else: + params = OrderedDict(((param.name, param) + for param in parameters)) + + self._parameters = params + self._return_annotation = return_annotation + + @classmethod + def from_function(cls, func): + '''Constructs Signature for the given python function''' + + if not isinstance(func, types.FunctionType): + raise TypeError('{0!r} is not a Python function'.format(func)) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + positional = tuple(arg_names[:pos_count]) + keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0) + keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] + annotations = getattr(func, '__annotations__', {}) + defaults = func.__defaults__ + kwdefaults = getattr(func, '__kwdefaults__', None) + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + # Non-keyword-only parameters w/o defaults. + non_default_count = pos_count - pos_default_count + for name in positional[:non_default_count]: + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD)) + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_POSITIONAL_OR_KEYWORD, + default=defaults[offset])) + + # *args + if func_code.co_flags & 0x04: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & 0x08: + index = pos_count + keyword_only_count + if func_code.co_flags & 0x04: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=False) + + @property + def parameters(self): + try: + return types.MappingProxyType(self._parameters) + except AttributeError: + return OrderedDict(self._parameters.items()) + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, parameters=_void, return_annotation=_void): + '''Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + ''' + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + def __hash__(self): + msg = "unhashable type: '{0}'".format(self.__class__.__name__) + raise TypeError(msg) + + def __eq__(self, other): + if (not issubclass(type(other), Signature) or + self.return_annotation != other.return_annotation or + len(self.parameters) != len(other.parameters)): + return False + + other_positions = dict((param, idx) + for idx, param in enumerate(other.parameters.keys())) + + for idx, (param_name, param) in enumerate(self.parameters.items()): + if param.kind == _KEYWORD_ONLY: + try: + other_param = other.parameters[param_name] + except KeyError: + return False + else: + if param != other_param: + return False + else: + try: + other_idx = other_positions[param_name] + except KeyError: + return False + else: + if (idx != other_idx or + param != other.parameters[param_name]): + return False + + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def _bind(self, args, kwargs, partial=False): + '''Private method. Don't use directly.''' + + arguments = OrderedDict() + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + if partial: + # Support for binding arguments to 'functools.partial' objects. + # See 'functools.partial' case in 'signature()' implementation + # for details. + for param_name, param in self.parameters.items(): + if (param._partial_kwarg and param_name not in kwargs): + # Simulating 'functools.partial' behavior + kwargs[param_name] = param.default + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + msg = '{arg!r} parameter is positional only, ' \ + 'but was passed as a keyword' + msg = msg.format(arg=param.name) + raise TypeError(msg) + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + if partial: + parameters_ex = (param,) + break + else: + msg = '{arg!r} parameter lacking default value' + msg = msg.format(arg=param.name) + raise TypeError(msg) + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError('too many positional arguments') + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs: + raise TypeError('multiple values for argument ' + '{arg!r}'.format(arg=param.name)) + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _POSITIONAL_ONLY: + # This should never happen in case of a properly built + # Signature object (but let's have this check here + # to ensure correct behaviour just in case) + raise TypeError('{arg!r} parameter is positional only, ' + 'but was passed as a keyword'. \ + format(arg=param.name)) + + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('{arg!r} parameter lacking default value'. \ + format(arg=param_name)) + + else: + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + else: + raise TypeError('too many keyword arguments %r' % kwargs) + + return self._bound_arguments_cls(self, arguments) + + def bind(*args, **kwargs): + '''Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + ''' + return args[0]._bind(args[1:], kwargs) + + def bind_partial(self, *args, **kwargs): + '''Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + ''' + return self._bind(args, kwargs, partial=True) + + def __str__(self): + result = [] + render_kw_only_separator = True + for idx, param in enumerate(self.parameters.values()): + formatted = str(param) + + kind = param.kind + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + rendered = '({0})'.format(', '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation) + rendered += ' -> {0}'.format(anno) + + return rendered diff --git a/ext/funcsigs/version.py b/ext/funcsigs/version.py new file mode 100644 index 0000000000..7863915fa5 --- /dev/null +++ b/ext/funcsigs/version.py @@ -0,0 +1 @@ +__version__ = "1.0.2" diff --git a/ext/imdbpie/__init__.py b/ext/imdbpie/__init__.py index 74e53f151d..97ee5cd965 100644 --- a/ext/imdbpie/__init__.py +++ b/ext/imdbpie/__init__.py @@ -1 +1,3 @@ +# -*- coding: utf-8 -*- from .imdbpie import Imdb +from .exceptions import ImdbAPIError diff --git a/ext/imdbpie/auth.py b/ext/imdbpie/auth.py new file mode 100644 index 0000000000..ccb8a733aa --- /dev/null +++ b/ext/imdbpie/auth.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import base64 +import json +import requests +import tempfile +from datetime import datetime +try: + from base64 import encodebytes +except ImportError: + from base64 import encodestring as encodebytes + +import diskcache +from dateutil.tz import tzutc +from dateutil.parser import parse +import boto.utils +from six.moves.urllib.parse import urlparse, parse_qs, quote +from boto import provider +from boto.connection import HTTPRequest +from boto.auth import HmacAuthV3HTTPHandler + +from .constants import APP_KEY, HOST, USER_AGENT, BASE_URI + + +class ZuluHmacAuthV3HTTPHandler(HmacAuthV3HTTPHandler): + + def sign_string(self, string_to_sign): + new_hmac = self._get_hmac() + new_hmac.update(string_to_sign) + return encodebytes(new_hmac.digest()).decode('utf-8').strip() + + def headers_to_sign(self, http_request): + headers_to_sign = {'Host': self.host} + for name, value in http_request.headers.items(): + lname = name.lower() + if lname.startswith('x-amz'): + headers_to_sign[name] = value + return headers_to_sign + + def canonical_query_string(self, http_request): + if http_request.method == 'POST': + return '' + qs_parts = [] + for param in sorted(http_request.params): + value = boto.utils.get_utf8_value(http_request.params[param]) + param_ = quote(param, safe='-_.~') + value_ = quote(value, safe='-_.~') + qs_parts.append('{0}={1}'.format(param_, value_)) + return '&'.join(qs_parts) + + def string_to_sign(self, http_request): + headers_to_sign = self.headers_to_sign(http_request) + canonical_qs = self.canonical_query_string(http_request) + canonical_headers = self.canonical_headers(headers_to_sign) + string_to_sign = '\n'.join(( + http_request.method, + http_request.path, + canonical_qs, + canonical_headers, + '', + http_request.body + )) + return string_to_sign, headers_to_sign + + +def _get_credentials(): + url = '{0}/authentication/credentials/temporary/ios82'.format(BASE_URI) + response = requests.post( + url, json={'appKey': APP_KEY}, headers={'User-Agent': USER_AGENT} + ) + response.raise_for_status() + return json.loads(response.content.decode('utf8'))['resource'] + + +class Auth(object): + + SOON_EXPIRES_SECONDS = 60 + _CREDS_STORAGE_KEY = 'imdbpie-credentials' + + def __init__(self, creds=None): + self._cachedir = tempfile.gettempdir() + + def _get_creds(self): + with diskcache.Cache(directory=self._cachedir) as cache: + return cache.get(self._CREDS_STORAGE_KEY) + + def _set_creds(self, creds): + with diskcache.Cache(directory=self._cachedir) as cache: + cache[self._CREDS_STORAGE_KEY] = creds + return creds + + def clear_cached_credentials(self): + with diskcache.Cache(directory=self._cachedir) as cache: + cache.delete(self._CREDS_STORAGE_KEY) + + def _creds_soon_expiring(self): + creds = self._get_creds() + if not creds: + return creds, True + expires_at = parse(creds['expirationTimeStamp']) + now = datetime.now(tzutc()) + if now < expires_at: + time_diff = expires_at - now + if time_diff.total_seconds() < self.SOON_EXPIRES_SECONDS: + # creds will soon expire, so renew them + return creds, True + return creds, False + else: + return creds, True + + def get_auth_headers(self, url_path): + creds, soon_expires = self._creds_soon_expiring() + if soon_expires: + creds = self._set_creds(creds=_get_credentials()) + + handler = ZuluHmacAuthV3HTTPHandler( + host=HOST, + config={}, + provider=provider.Provider( + name='aws', + access_key=creds['accessKeyId'], + secret_key=creds['secretAccessKey'], + security_token=creds['sessionToken'], + ) + ) + parsed_url = urlparse(url_path) + params = { + key: val[0] for key, val in parse_qs(parsed_url.query).items() + } + request = HTTPRequest( + method='GET', protocol='https', host=HOST, + port=443, path=parsed_url.path, auth_path=None, params=params, + headers={}, body='' + ) + handler.add_auth(req=request) + headers = request.headers + headers['User-Agent'] = USER_AGENT + return headers diff --git a/ext/imdbpie/constants.py b/ext/imdbpie/constants.py index ecaab24740..5074b827f4 100644 --- a/ext/imdbpie/constants.py +++ b/ext/imdbpie/constants.py @@ -1,68 +1,10 @@ +# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import hashlib -BASE_URI = 'https://app.imdb.com' -API_KEY = '2wex6aeu6a8q9e49k7sfvufd6rhh0n' -SHA1_KEY = hashlib.sha1(API_KEY.encode('utf8')).hexdigest() -USER_AGENTS = ( - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) ' - 'AppleWebKit/534.46 (KHTML, like Gecko) Mobile/9A334', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 ' - '(KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X) AppleWebKit/' - '534.46 (KHTML, like Gecko) Mobile/9A405', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X) AppleWebKit/' - '534.46 (KHTML, like Gecko) Mobile/9A406', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X) AppleWebKit/' - '534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A405 Safari/7534.48.3', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X) AppleWebKit/' - '534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A406 Safari/7534.48.3', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/' - '534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534' - '.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26' - ' (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_2 like Mac OS X) AppleWebKit/53' - '6.26 (KHTML, like Gecko) Version/6.0 Mobile/10B146 Safari/8536.25', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_3 like Mac OS X) AppleWebKit/' - '536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_1_4 like Mac OS X) AppleWebKit/' - '536.26 (KHTML, like Gecko) Version/6.0 Mobile/10B350 Safari/8536.25', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/' - '537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/' - '537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_4 like Mac OS X) AppleWebKit/' - '537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B554a Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_6 like Mac OS X) AppleWebKit/' - '537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B651 Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) AppleWebKit/' - '537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D167 Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 7_1_1 like Mac OS X) AppleWebKit/' - '537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D201 Safari/9537.53', - 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0_2 like Mac OS X) AppleWebKit/' - '600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A366 Safari/600.1.4' - 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.4' - ' (KHTML, like Gecko) Version/8.0 Mobile/12A366 Safari/600.1.4', - 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebK' - 'it/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16', - 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebK' - 'it/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7', - 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_2 like Mac OS X; en-us) AppleWe' - 'bKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 ' - 'Mobile/8H7 Safari/6533.18.5', - 'Mozilla/5.0 (iPod; CPU iPhone OS 6_1_3 like Mac OS X) AppleWebKit/536.26 ' - '(KHTML, like Gecko) Version/6.0 Mobile/10B329 Safari/8536.25', - 'Mozilla/5.0 (iphone; U; CPU iPhone OS 4_3_5 like Mac OS X; zh-cn) AppleWe' - 'bKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 ' - 'Mobile/8J2 Safari/6533.18.5', - 'Mozilla/5.0 (iphone; cpu iphone os 7_0_2 like mac os x) Applewebkit/537.5' - '1.1 (khtml, like gecko) version/7.0 mobile/11a501 safari/9537.53', - 'Mozilla/5.0(iPhone; U; CPU iPhone OS 4_1 like Mac OS X; en-us)AppleWebKit' - '/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8B5097d Safari/6531.22.7', - 'Mozilla/5.0(iPhone;U;CPUiPhoneOS4_0likeMacOSX;en-us)AppleWebKit/532.9(KHT' - 'ML,likeGecko)Version/4.0.5Mobile/8A293Safari/6531.22.7' -) -DEFAULT_PROXY_URI = 'http://openwebproxy.pw/browse.php?u={0}' +HOST = 'api.imdbws.com' +BASE_URI = 'https://{0}'.format(HOST) +SEARCH_BASE_URI = 'https://v2.sg.media-imdb.com' +USER_AGENT = 'IMDb/8.3.1 (iPhone9,4; iOS 11.2.1)' +APP_KEY = '76a6cc20-6073-4290-8a2c-951b4580ae4a' diff --git a/ext/imdbpie/exceptions.py b/ext/imdbpie/exceptions.py index 36a7bcd7e4..c6026af13b 100644 --- a/ext/imdbpie/exceptions.py +++ b/ext/imdbpie/exceptions.py @@ -1 +1,3 @@ -from requests.exceptions import HTTPError +# -*- coding: utf-8 -*- +class ImdbAPIError(Exception): + pass diff --git a/ext/imdbpie/imdbpie.py b/ext/imdbpie/imdbpie.py index 6b8a8f9db9..101aeb5705 100644 --- a/ext/imdbpie/imdbpie.py +++ b/ext/imdbpie/imdbpie.py @@ -1,7 +1,9 @@ +# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import re import json +import tempfile import time import random import logging @@ -9,93 +11,129 @@ import warnings import requests -from cachecontrol import CacheControl -from cachecontrol.caches import FileCache -from six.moves import html_parser +from requests.exceptions import HTTPError +from six import text_type from six.moves import http_client as httplib -from six.moves.urllib.parse import urlencode, quote, quote_plus - -from imdbpie.objects import Image, Title, Person, Episode, Review -from imdbpie.constants import ( - BASE_URI, SHA1_KEY, USER_AGENTS, DEFAULT_PROXY_URI +from six.moves.urllib.parse import ( + urlencode, quote, quote_plus, unquote, urlparse ) +from .constants import BASE_URI, HOST, SEARCH_BASE_URI +from .auth import Auth +from .exceptions import ImdbAPIError + logger = logging.getLogger(__name__) -class Imdb(object): +class Imdb(Auth): - def __init__(self, api_key=None, locale=None, anonymize=False, - exclude_episodes=False, user_agent=None, cache=None, - proxy_uri=None, verify_ssl=True, session=None): - self.api_key = api_key or SHA1_KEY - self.timestamp = time.mktime(datetime.date.today().timetuple()) - self.user_agent = user_agent or random.choice(USER_AGENTS) + def __init__(self, locale=None, exclude_episodes=False, session=None): self.locale = locale or 'en_US' self.exclude_episodes = exclude_episodes - self.caching_enabled = True if cache is True else False - self.proxy_uri = proxy_uri or DEFAULT_PROXY_URI - self.anonymize = anonymize - self.verify_ssl = verify_ssl self.session = session or requests.Session() + self._cachedir = tempfile.gettempdir() + + def get_name(self, imdb_id): + logger.info('getting name {0}'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + return self._get_resource('/name/{0}/fulldetails'.format(imdb_id)) + + def get_name_filmography(self, imdb_id): + logger.info('getting name {0} filmography'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + return self._get_resource('/name/{0}/filmography'.format(imdb_id)) - if self.caching_enabled: - warnings.warn('caching will be removed in version 5.0.0 ' - 'due to not being thread safe') - self.session = CacheControl( - self.session, cache=FileCache('.imdbpie_cache') + def get_title(self, imdb_id): + logger.info('getting title {0}'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + try: + resource = self._get_resource( + '/title/{0}/auxiliary'.format(imdb_id) ) + except LookupError: + self._title_not_found() - def get_person_by_id(self, imdb_id): + if ( + self.exclude_episodes is True and + resource['base']['titleType'] == 'tvEpisode' + ): + raise LookupError( + 'Title not found. Title was an episode and ' + '"exclude_episodes" is set to true' + ) + return resource + + def get_title_credits(self, imdb_id): + logger.info('getting title {0} credits'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/name/maindetails', {'nconst': imdb_id}) - response = self._get(url) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/fullcredits'.format(imdb_id)) - if response is None or self._is_redirection_result(response): - return None + def get_title_quotes(self, imdb_id): + logger.info('getting title {0} quotes'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/quotes'.format(imdb_id)) - person = Person(response["data"]) - return person + def get_title_ratings(self, imdb_id): + logger.info('getting title {0} ratings'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/ratings'.format(imdb_id)) - def get_title_by_id(self, imdb_id): + def get_title_genres(self, imdb_id): + logger.info('getting title {0} genres'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/title/maindetails', {'tconst': imdb_id}) - response = self._get(url) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/genres'.format(imdb_id)) - if response is None or self._is_redirection_result(response): - return None + def get_title_similarities(self, imdb_id): + logger.info('getting title {0} similarities'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/similarities'.format(imdb_id)) - # get the full cast information, add key if not present - response['data']['credits'] = self._get_credits_data(imdb_id) - response['data']['plots'] = self.get_title_plots(imdb_id) + def get_title_awards(self, imdb_id): + logger.info('getting title {0} awards'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/awards'.format(imdb_id)) - if ( - self.exclude_episodes is True and - response['data'].get('type') == 'tv_episode' - ): - return None + def get_title_connections(self, imdb_id): + logger.info('getting title {0} connections'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/connections'.format(imdb_id)) - title = Title(data=response['data']) - return title + def get_title_releases(self, imdb_id): + logger.info('getting title {0} releases'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/releases'.format(imdb_id)) - def get_title_plots(self, imdb_id): + def get_title_versions(self, imdb_id): + logger.info('getting title {0} versions'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/title/plot', {'tconst': imdb_id}) - response = self._get(url) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/versions'.format(imdb_id)) - if response['data']['tconst'] != imdb_id: # pragma: no cover - return [] + def get_title_plot(self, imdb_id): + logger.info('getting title {0} plot'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/plot'.format(imdb_id)) - plots = response['data'].get('plots', []) - return [plot.get('text') for plot in plots] + def get_title_plot_synopsis(self, imdb_id): + logger.info('getting title {0} plot synopsis'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/plotsynopsis'.format(imdb_id)) def title_exists(self, imdb_id): self.validate_imdb_id(imdb_id) page_url = 'http://www.imdb.com/title/{0}/'.format(imdb_id) - if self.anonymize is True: - page_url = self.proxy_uri.format(quote(page_url)) - response = self.session.head(page_url) if response.status_code == httplib.OK: @@ -108,12 +146,16 @@ def title_exists(self, imdb_id): else: response.raise_for_status() - def search_for_person(self, name): + def search_for_name(self, name): + logger.info('searching for name {0}'.format(name)) + name = re.sub(r'\W+', '_', name).strip('_') query = quote(name) - url = 'https://v2.sg.media-imdb.com/suggests/{0}/{1}.json'.format( - query[0].lower(), query + first_alphanum_char = self._query_first_alpha_num(name) + url = ( + '{0}/suggests/{1}/{2}.json'.format(SEARCH_BASE_URI, + first_alphanum_char, query) ) - search_results = self._get(url) + search_results = self._get(url=url, query=query) results = [] for result in search_results.get('d', ()): if not result['id'].startswith('nm'): @@ -127,133 +169,93 @@ def search_for_person(self, name): return results def search_for_title(self, title): + logger.info('searching for title {0}'.format(title)) + title = re.sub(r'\W+', '_', title).strip('_') query = quote(title) - url = 'https://v2.sg.media-imdb.com/suggests/{0}/{1}.json'.format( - query[0].lower(), query + first_alphanum_char = self._query_first_alpha_num(title) + url = ( + '{0}/suggests/{1}/{2}.json'.format(SEARCH_BASE_URI, + first_alphanum_char, query) ) - search_results = self._get(url) + search_results = self._get(url=url, query=query) results = [] for result in search_results.get('d', ()): result_item = { 'title': result['l'], - 'year': str(result.get('y')) if result.get('y') else None, + 'year': text_type(result['y']) if result.get('y') else None, 'imdb_id': result['id'], 'type': result.get('q'), } results.append(result_item) return results - def top_250(self): - url = self._build_url('/chart/top', {}) - response = self._get(url) - return response['data']['list']['list'] + def get_popular_titles(self): + return self._get_resource('/chart/titlemeter') - def popular_shows(self): - url = self._build_url('/chart/tv', {}) - response = self._get(url) - return response['data']['list'] + def get_popular_shows(self): + return self._get_resource('/chart/tvmeter') - def popular_movies(self): - url = self._build_url('/chart/moviemeter', {}) - response = self._get(url) - return response['data']['list'] + def get_popular_movies(self): + return self._get_resource('/chart/moviemeter') def get_title_images(self, imdb_id): + logger.info('getting title {0} images'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/title/photos', {'tconst': imdb_id}) - response = self._get(url) - return self._get_images(response) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/images'.format(imdb_id)) - def get_title_reviews(self, imdb_id, max_results=None): - """Retrieve reviews for a title ordered by 'Best' descending""" + def get_title_videos(self, imdb_id): + logger.info('getting title {0} videos'.format(imdb_id)) self.validate_imdb_id(imdb_id) - user_comments = self._get_reviews_data( - imdb_id, - max_results=max_results - ) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/videos'.format(imdb_id)) - if not user_comments: - return None - - title_reviews = [] - - for review_data in user_comments: - title_reviews.append(Review(review_data)) - return title_reviews - - def get_person_images(self, imdb_id): + def get_title_user_reviews(self, imdb_id): + logger.info('getting title {0} reviews'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/name/photos', {'nconst': imdb_id}) - response = self._get(url) - return self._get_images(response) + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/userreviews'.format(imdb_id)) - def get_episodes(self, imdb_id): + def get_title_metacritic_reviews(self, imdb_id): + logger.info('getting title {0} metacritic reviews'.format(imdb_id)) self.validate_imdb_id(imdb_id) - if self.exclude_episodes: - raise ValueError('exclude_episodes is currently set') - - title = self.get_title_by_id(imdb_id) - if title.type != 'tv_series': - raise RuntimeError('Title provided is not of type TV Series') + self._redirection_title_check(imdb_id) + return self._get_resource('/title/{0}/metacritic'.format(imdb_id)) - url = self._build_url('/title/episodes', {'tconst': imdb_id}) - response = self._get(url) - - if response is None: - return None - - seasons = response.get('data').get('seasons') - episodes = [] - - for season in seasons: - season_number = season.get('token') - for idx, episode_data in enumerate(season.get('list')): - episode_data['series_name'] = title.title - episode_data['episode'] = idx + 1 - episode_data['season'] = season_number - e = Episode(episode_data) - episodes.append(e) - - return episodes - - def _get_credits_data(self, imdb_id): + def get_name_images(self, imdb_id): + logger.info('getting namne {0} images'.format(imdb_id)) self.validate_imdb_id(imdb_id) - url = self._build_url('/title/fullcredits', {'tconst': imdb_id}) - response = self._get(url) + return self._get_resource('/name/{0}/images'.format(imdb_id)) - if response is None: - return None - - return response.get('data').get('credits') - - def _get_reviews_data(self, imdb_id, max_results=None): + def get_name_videos(self, imdb_id): + logger.info('getting namne {0} videos'.format(imdb_id)) self.validate_imdb_id(imdb_id) - params = {'tconst': imdb_id} - if max_results: - params['limit'] = max_results - url = self._build_url('/title/usercomments', params) - response = self._get(url) - - if response is None: - return None + return self._get_resource('/name/{0}/videos'.format(imdb_id)) - return response.get('data').get('user_comments') - - def _get_images(self, response): - images = [] - - for image_data in response.get('data').get('photos', []): - images.append(Image(image_data)) - - return images + def get_title_episodes(self, imdb_id): + logger.info('getting title {0} episodes'.format(imdb_id)) + self.validate_imdb_id(imdb_id) + if self.exclude_episodes: + raise ValueError('exclude_episodes is current set to true') + return self._get_resource('/title/{0}/episodes'.format(imdb_id)) @staticmethod def _cache_response(file_path, resp): with open(file_path, 'w+') as f: json.dump(resp, f) - def _parse_dirty_json(self, data): - match_json_within_dirty_json = r'imdb\$[\w_]+\({1}(.+)\){1}' + def _parse_dirty_json(self, data, query=None): + if query is None: + match_json_within_dirty_json = r'imdb\$.+\({1}(.+)\){1}' + else: + query_match = ''.join( + char if char.isalnum() else '[{0}]'.format(char) + for char in unquote(query) + ) + query_match = query_match.replace('[ ]', '.+') + match_json_within_dirty_json = ( + r'imdb\${}\((.+)\)'.format(query_match) + ) data_clean = re.match( match_json_within_dirty_json, data, re.IGNORECASE ).groups()[0] @@ -267,46 +269,6 @@ def validate_imdb_id(imdb_id): except (AttributeError, TypeError): raise ValueError('invalid imdb id') - def _get(self, url): - resp = self.session.get( - url, - headers={'User-Agent': self.user_agent}, - verify=self.verify_ssl - ) - - resp.raise_for_status() - resp_data = resp.content.decode('utf-8') - try: - resp_dict = json.loads(resp_data) - except ValueError: - resp_dict = self._parse_dirty_json(resp_data) - - if resp_dict.get('error'): - return None - - return resp_dict - - def _build_url(self, path, params): - default_params = { - 'api': 'v1', - 'appid': 'iphone1_1', - 'apiPolicy': 'app1_1', - 'apiKey': self.api_key, - 'locale': self.locale, - 'timestamp': self.timestamp - } - - query_params = dict( - list(default_params.items()) + list(params.items()) - ) - query_params = urlencode(query_params) - url = '{base}{path}?{params}'.format(base=BASE_URI, - path=path, params=query_params) - - if self.anonymize is True: - return self.proxy_uri.format(quote(url)) - return url - @staticmethod def _is_redirection_result(response): """ @@ -320,3 +282,60 @@ def _is_redirection_result(response): ): return True return False + + def _get_resource(self, path): + url = '{0}{1}'.format(BASE_URI, path) + return self._get(url=url)['resource'] + + def _get(self, url, query=None): + path = urlparse(url).path + headers = {'Accept-Language': self.locale} + headers.update(self.get_auth_headers(path)) + resp = self.session.get(url, headers=headers) + + if not resp.ok: + if resp.status_code == httplib.NOT_FOUND: + raise LookupError('Resource {0} not found'.format(path)) + else: + msg = '{0} {1}'.format(resp.status_code, resp.text) + raise ImdbAPIError(msg) + resp_data = resp.content.decode('utf-8') + try: + resp_dict = json.loads(resp_data) + except ValueError: + resp_dict = self._parse_dirty_json( + data=resp_data, query=query + ) + + if resp_dict.get('error'): + return None + + return resp_dict + + def _redirection_title_check(self, imdb_id): + if self.is_redirection_title(imdb_id): + self._title_not_found( + msg='{0} is a redirection imdb id'.format(imdb_id) + ) + + def is_redirection_title(self, imdb_id): + self.validate_imdb_id(imdb_id) + page_url = 'http://www.imdb.com/title/{0}/'.format(imdb_id) + response = self.session.head(page_url) + if response.status_code == httplib.MOVED_PERMANENTLY: + return True + else: + return False + + def _query_first_alpha_num(self, query): + for char in query.lower(): + if char.isalnum(): + return char + raise ValueError( + 'invalid query, does not contain any alphanumeric characters' + ) + + def _title_not_found(self, msg=''): + if msg: + msg = ' {0}'.format(msg) + raise LookupError('Title not found.{0}'.format(msg)) diff --git a/ext/imdbpie/objects.py b/ext/imdbpie/objects.py deleted file mode 100644 index f965a5cbdf..0000000000 --- a/ext/imdbpie/objects.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import absolute_import, unicode_literals - - -class Person(object): - - def __init__(self, data): - # primary attributes that should be set in all cases - - self.name = self._extract_name(data) - self.imdb_id = self._extract_imdb_id(data) - self.photo_url = self._extract_photo_url(data) - - # secondary attribs, will only get data when called via get_title_by_id - - # token and label are the persons categorisation - # e.g token: writers label: Series writing credits - self.token = data.get('token') - self.label = data.get('label') - # attr is a note about this persons work - # e.g. (1990 - 1992 20 episodes) - self.attr = data.get('attr') - # other primary information about their part - self.roles = ( - data.get('char').split('/') if data.get('char') else [] - ) - self.job = data.get('job') - - @staticmethod - def _extract_name(data): - # Person object can given response of get_title_by_id - # or get_person_by_id call. - # This function covers the slight data structure differences - # to extract the name - name = data.get('name') - if isinstance(name, dict): - return name.get('name') - return name - - @staticmethod - def _extract_imdb_id(data): - name = data.get('name') - if isinstance(name, dict): - return name.get('nconst') - return data.get('nconst') - - @staticmethod - def _extract_photo_url(data): - photo_url = data.get('image', {}).get('url') - return photo_url - - def __repr__(self): - return ''.format(repr(self.name), - repr(self.imdb_id)) - - def __unicode__(self): - return ''.format(self.name.encode('utf-8'), - self.imdb_id) - - -class Title(object): - - def __init__(self, data): - self.imdb_id = data.get('tconst') - self.title = data.get('title') - self.type = data.get('type') - self.year = self._extract_year(data) - self.tagline = data.get('tagline') - self.plots = data.get('plots') - self.plot_outline = data.get('plot', {}).get('outline') - self.rating = data.get('rating') - self.genres = data.get('genres') - self.votes = data.get('num_votes') - self.runtime = data.get('runtime', {}).get('time') - self.poster_url = data.get('image', {}).get('url') - self.cover_url = self._extract_cover_url(data) - self.release_date = data.get('release_date', {}).get('normal') - self.certification = data.get('certificate', {}).get( - 'certificate') - self.trailer_image_urls = self._extract_trailer_image_urls(data) - self.directors_summary = self._extract_directors_summary(data) - self.creators = self._extract_creators(data) - self.cast_summary = self._extract_cast_summary(data) - self.writers_summary = self._extract_writers_summary(data) - self.credits = self._extract_credits(data) - self.trailers = self._extract_trailers(data) - - def _extract_directors_summary(self, data): - return [Person(p) for p in data.get('directors_summary', [])] - - def _extract_creators(self, data): - return [Person(p) for p in data.get('creators', [])] - - def _extract_trailers(self, data): - def build_dict(val): - return {'url': val['url'], 'format': val['format']} - - trailers = data.get('trailer', {}).get('encodings', {}).values() - return [build_dict(trailer) for trailer in trailers] - - def _extract_writers_summary(self, data): - return [Person(p) for p in data.get('writers_summary', [])] - - def _extract_cast_summary(self, data): - return [Person(p) for p in data.get('cast_summary', [])] - - def _extract_credits(self, data): - credits = [] - - if not data.get('credits'): - return [] - - for credit_group in data['credits']: - """ - Possible tokens: directors, cast, writers, producers and others - """ - for person in credit_group['list']: - person_extra = { - 'token': credit_group.get('token'), - 'label': credit_group.get('label'), - 'job': person.get('job'), - 'attr': person.get('attr') - } - person_data = person.copy() - person_data.update(person_extra) - if 'name' in person_data.keys(): - # some 'special' credits such as script rewrites - # have different formatting. - # we skip those here, losing some data due to this check - credits.append(Person(person_data)) - return credits - - def _extract_year(self, data): - year = data.get('year') - # if there's no year the API returns ????... - if not year or year == '????': - return None - return int(year) - - def _extract_cover_url(self, data): - if self.poster_url: - return '{0}_SX214_.jpg'.format(self.poster_url.replace('.jpg', '')) - - def _extract_trailer_image_urls(self, data): - slates = data.get('trailer', {}).get('slates', []) - return [s['url'] for s in slates] - - def __repr__(self): - return ''.format(repr(self.title), - repr(self.imdb_id)) - - def __unicode__(self): - return ''.format(self.title, self.imdb_id) - - -class Image(object): - - def __init__(self, data): - self.caption = data.get('caption') - self.url = data.get('image', {}).get('url') - self.width = data.get('image', {}).get('width') - self.height = data.get('image', {}).get('height') - - def __repr__(self): - return ''.format(repr(self.caption)) - - def __unicode__(self): - return ''.format(self.caption.encode('utf-8')) - - -class Episode(object): - - def __init__(self, data): - self.imdb_id = data.get('tconst') - self.release_date = data.get('release_date', {}).get('normal') - self.title = data.get('title') - self.series_name = data.get('series_name') - self.type = data.get('type') - self.year = self._extract_year(data) - self.season = self._extract_season_episode(data.get('season')) - self.episode = self._extract_season_episode(data.get('episode')) - - def _extract_season_episode(self, value): - return int(value) if value and value != 'unknown' else None - - def _extract_year(self, data): - year = data.get('year') - # if there's no year the API returns ????... - if not year or year == '????': - return None - return int(year) - - def __repr__(self): - return ''.format(repr(self.title), - repr(self.imdb_id)) - - def __unicode__(self): - return ''.format(self.title, self.imdb_id) - - -class Review(object): - - def __init__(self, data): - self.username = data.get('user_name') - self.text = data.get('text') - self.date = data.get('date') - self.rating = data.get('user_rating') - self.summary = data.get('summary') - self.status = data.get('status') - self.user_location = data.get('user_location') - self.user_score = data.get('user_score') - self.user_score_count = data.get('user_score_count') - - def __repr__(self): - return ''.format(repr(self.text[:20])) - - def __unicode__(self): - return ''.format(self.text[:20].encode('utf-8')) diff --git a/ext/js2py/__init__.py b/ext/js2py/__init__.py index 7b71b8199d..b6c7bab455 100644 --- a/ext/js2py/__init__.py +++ b/ext/js2py/__init__.py @@ -64,8 +64,10 @@ __author__ = 'Piotr Dabkowski' __all__ = ['EvalJs', 'translate_js', 'import_js', 'eval_js', 'parse_js', 'translate_file', - 'run_file', 'disable_pyimport', 'eval_js6', 'translate_js6', 'PyJsException', 'get_file_contents', 'write_file_contents'] + 'run_file', 'disable_pyimport', 'eval_js6', 'translate_js6', 'PyJsException', 'get_file_contents', 'write_file_contents', 'require'] + from .base import PyJsException from .evaljs import * from .translators import parse as parse_js +from .node_import import require diff --git a/ext/js2py/host/jsfunctions.py b/ext/js2py/host/jsfunctions.py index 592b55c10d..f7cd815fc2 100644 --- a/ext/js2py/host/jsfunctions.py +++ b/ext/js2py/host/jsfunctions.py @@ -1,4 +1,6 @@ from ..base import * +from six.moves.urllib.parse import quote, unquote + RADIX_CHARS = {'1': 1, '0': 0, '3': 3, '2': 2, '5': 5, '4': 4, '7': 7, '6': 6, '9': 9, '8': 8, 'a': 10, 'c': 12, 'b': 11, 'e': 14, 'd': 13, 'g': 16, 'f': 15, 'i': 18, 'h': 17, 'k': 20, 'j': 19, 'm': 22, 'l': 21, @@ -79,7 +81,28 @@ def isFinite(number): return true -#todo URI handling! +# todo test them properly + +@Js +def escape(text): + return quote(text.to_string().value) + +@Js +def unescape(text): + return unquote(text.to_string().value) + +@Js +def encodeURI(text): + return quote(text.to_string().value, safe='~@#$&()*!+=:;,.?/\'') +@Js +def decodeURI(text): + return unquote(text.to_string().value) +@Js +def encodeURIComponent(text): + return quote(text.to_string().value, safe='~()*!.\'') +@Js +def decodeURIComponent(text): + return unquote(text.to_string().value) diff --git a/ext/js2py/node_import.py b/ext/js2py/node_import.py new file mode 100644 index 0000000000..c679310e2c --- /dev/null +++ b/ext/js2py/node_import.py @@ -0,0 +1,88 @@ +__all__ = ['require'] +import subprocess, os, codecs +from .evaljs import translate_js +import six +DID_INIT = False +DIRNAME = os.path.dirname(os.path.abspath(__file__)) +PY_NODE_MODULES_PATH = os.path.join(DIRNAME, 'py_node_modules') +def _init(): + global DID_INIT + if DID_INIT: + return + assert subprocess.call('node -v', shell=True, cwd=DIRNAME)==0, 'You must have node installed! run: brew install node' + assert subprocess.call('cd %s;npm install babel-core babel-cli babel-preset-es2015 babel-polyfill babelify browserify' % repr(DIRNAME), shell=True, cwd=DIRNAME)==0, 'Could not link required node_modules' + DID_INIT = True + +ADD_TO_GLOBALS_FUNC = ''' +;function addToGlobals(name, obj) { + if (!Object.prototype.hasOwnProperty('_fake_exports')) { + Object.prototype._fake_exports = {}; + } + Object.prototype._fake_exports[name] = obj; +}; + +''' +# subprocess.call("""node -e 'require("browserify")'""", shell=True) +GET_FROM_GLOBALS_FUNC = ''' +;function getFromGlobals(name) { + if (!Object.prototype.hasOwnProperty('_fake_exports')) { + throw Error("Could not find any value named "+name); + } + if (Object.prototype._fake_exports.hasOwnProperty(name)) { + return Object.prototype._fake_exports[name]; + } else { + throw Error("Could not find any value named "+name); + } +}; + +''' + +def require(module_name, include_polyfill=False, update=False): + assert isinstance(module_name, str), 'module_name must be a string!' + py_name = module_name.replace('-', '_') + module_filename = '%s.py'%py_name + cached_py_npm_modules = os.listdir(PY_NODE_MODULES_PATH) + if module_filename not in cached_py_npm_modules or update: + _init() + in_file_name = 'tmp0in439341018923js2py.js' + out_file_name = 'tmp0out439341018923js2py.js' + code = ADD_TO_GLOBALS_FUNC + if include_polyfill: + code += "\n;require('babel-polyfill');\n" + code += """ + var module_temp_love_python = require(%s); + addToGlobals(%s, module_temp_love_python); + """ % (repr(module_name), repr(module_name)) + with open(os.path.join(DIRNAME, in_file_name), 'wb') as f: + f.write(code.encode('utf-8') if six.PY3 else code) + + # make sure the module is installed + assert subprocess.call('cd %s;npm install %s' %(repr(DIRNAME), module_name), shell=True, cwd=DIRNAME)==0, 'Could not install the required module: ' + module_name + + # convert the module + assert subprocess.call( + '''node -e "(require('browserify')('./%s').bundle(function (err,data) {fs.writeFile('%s', require('babel-core').transform(data, {'presets': require('babel-preset-es2015')}).code, ()=>{});}))"''' % (in_file_name, out_file_name), + shell=True, + cwd=DIRNAME, + )==0, 'Error when converting module to the js bundle' + + os.remove(os.path.join(DIRNAME, in_file_name)) + with codecs.open(os.path.join(DIRNAME, out_file_name), "r", "utf-8") as f: + js_code = f.read() + os.remove(os.path.join(DIRNAME, out_file_name)) + + js_code += GET_FROM_GLOBALS_FUNC + js_code += ';var %s = getFromGlobals(%s);%s' % (py_name, repr(module_name), py_name) + print('Please wait, translating...') + py_code = translate_js(js_code) + + with open(os.path.join(PY_NODE_MODULES_PATH, module_filename), 'wb') as f: + f.write(py_code.encode('utf-8') if six.PY3 else py_code) + else: + with codecs.open(os.path.join(PY_NODE_MODULES_PATH, module_filename), "r", "utf-8") as f: + py_code = f.read() + + context = {} + exec(py_code, context) + return context['var'][py_name].to_py() + diff --git a/ext/js2py/py_node_modules/__init__.py b/ext/js2py/py_node_modules/__init__.py new file mode 100644 index 0000000000..3a6e544e40 --- /dev/null +++ b/ext/js2py/py_node_modules/__init__.py @@ -0,0 +1 @@ +"""this package contains all the npm modules translated by js2py via node import""" diff --git a/ext/js2py/pyjs.py b/ext/js2py/pyjs.py index db49f4f775..55b1d6061d 100644 --- a/ext/js2py/pyjs.py +++ b/ext/js2py/pyjs.py @@ -21,7 +21,9 @@ from .prototypes.jsjson import JSON from .host.console import console from .host.jseval import Eval -from .host.jsfunctions import parseFloat, parseInt, isFinite, isNaN +from .host.jsfunctions import parseFloat, parseInt, isFinite, \ + isNaN, escape, unescape, encodeURI, decodeURI, encodeURIComponent, decodeURIComponent + # Now we have all the necessary items to create global environment for script __all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq', @@ -39,7 +41,9 @@ 'Int32Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'ArrayBuffer', - 'parseFloat', 'parseInt', 'isFinite', 'isNaN') + 'parseFloat', 'parseInt', 'isFinite', 'isNaN', + 'escape', 'unescape', 'encodeURI', 'decodeURI', 'encodeURIComponent', 'decodeURIComponent', + ) #Array, Function, JSON, Error is done later :) # also some built in functions like eval... @@ -52,6 +56,9 @@ def set_global_object(obj): # make this available obj.register('this') obj.put('this', this) + # also add window and set it to be a global object for compatibility + obj.register('window') + obj.put('window', this) diff --git a/ext/mock/__init__.py b/ext/mock/__init__.py new file mode 100644 index 0000000000..82a3110387 --- /dev/null +++ b/ext/mock/__init__.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import +import mock.mock as _mock +from mock.mock import * +__all__ = _mock.__all__ +#import mock.mock as _mock +#for name in dir(_mock): +# globals()[name] = getattr(_mock, name) diff --git a/ext/mock/mock.py b/ext/mock/mock.py new file mode 100644 index 0000000000..c674a858fc --- /dev/null +++ b/ext/mock/mock.py @@ -0,0 +1,2553 @@ +# mock.py +# Test tools for mocking and patching. +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# +# mock 1.0.1 +# http://www.voidspace.org.uk/python/mock/ +# +# Copyright (c) 2007-2013, Michael Foord & the mock team +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import + +__all__ = ( + '__version__', + 'version_info', + 'Mock', + 'MagicMock', + 'patch', + 'sentinel', + 'DEFAULT', + 'ANY', + 'call', + 'create_autospec', + 'FILTER_DIR', + 'CallableMixin', + 'NonCallableMock', + 'NonCallableMagicMock', + 'mock_open', + 'PropertyMock', +) + + +from functools import partial +import inspect +import pprint +import sys +try: + import builtins +except ImportError: + import __builtin__ as builtins +from types import ModuleType + +import six +from six import wraps +from pbr.version import VersionInfo + +_v = VersionInfo('mock').semantic_version() +__version__ = _v.release_string() +version_info = _v.version_tuple() + +import mock + +try: + inspectsignature = inspect.signature +except AttributeError: + import funcsigs + inspectsignature = funcsigs.signature + + +# TODO: use six. +try: + unicode +except NameError: + # Python 3 + basestring = unicode = str + +try: + long +except NameError: + # Python 3 + long = int + +try: + BaseException +except NameError: + # Python 2.4 compatibility + BaseException = Exception + +if six.PY2: + # Python 2's next() can't handle a non-iterator with a __next__ method. + _next = next + def next(obj, _next=_next): + if getattr(obj, '__next__', None): + return obj.__next__() + return _next(obj) + + del _next + + +_builtins = set(name for name in dir(builtins) if not name.startswith('_')) + +BaseExceptions = (BaseException,) +if 'java' in sys.platform: + # jython + import java + BaseExceptions = (BaseException, java.lang.Throwable) + +try: + _isidentifier = str.isidentifier +except AttributeError: + # Python 2.X + import keyword + import re + regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) + def _isidentifier(string): + if string in keyword.kwlist: + return False + return regex.match(string) + +self = 'im_self' +builtin = '__builtin__' +if six.PY3: + self = '__self__' + builtin = 'builtins' + +# NOTE: This FILTER_DIR is not used. The binding in mock.FILTER_DIR is. +FILTER_DIR = True + +# Workaround for Python issue #12370 +# Without this, the __class__ properties wouldn't be set correctly +_safe_super = super + +def _is_instance_mock(obj): + # can't use isinstance on Mock objects because they override __class__ + # The base class for all mocks is NonCallableMock + return issubclass(type(obj), NonCallableMock) + + +def _is_exception(obj): + return ( + isinstance(obj, BaseExceptions) or + isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions) + ) + + +class _slotted(object): + __slots__ = ['a'] + + +DescriptorTypes = ( + type(_slotted.a), + property, +) + + +def _get_signature_object(func, as_instance, eat_self): + """ + Given an arbitrary, possibly callable object, try to create a suitable + signature object. + Return a (reduced func, signature) tuple, or None. + """ + if isinstance(func, ClassTypes) and not as_instance: + # If it's a type and should be modelled as a type, use __init__. + try: + func = func.__init__ + except AttributeError: + return None + # Skip the `self` argument in __init__ + eat_self = True + elif not isinstance(func, FunctionTypes): + # If we really want to model an instance of the passed type, + # __call__ should be looked up, not __init__. + try: + func = func.__call__ + except AttributeError: + return None + if eat_self: + sig_func = partial(func, None) + else: + sig_func = func + + try: + return func, inspectsignature(sig_func) + except ValueError: + # Certain callable types are not supported by inspect.signature() + return None + + +def _check_signature(func, mock, skipfirst, instance=False): + sig = _get_signature_object(func, instance, skipfirst) + if sig is None: + return + func, sig = sig + def checksig(_mock_self, *args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + type(mock)._mock_check_sig = checksig + + +def _copy_func_details(func, funcopy): + funcopy.__name__ = func.__name__ + funcopy.__doc__ = func.__doc__ + try: + funcopy.__text_signature__ = func.__text_signature__ + except AttributeError: + pass + # we explicitly don't copy func.__dict__ into this copy as it would + # expose original attributes that should be mocked + try: + funcopy.__module__ = func.__module__ + except AttributeError: + pass + try: + funcopy.__defaults__ = func.__defaults__ + except AttributeError: + pass + try: + funcopy.__kwdefaults__ = func.__kwdefaults__ + except AttributeError: + pass + if six.PY2: + funcopy.func_defaults = func.func_defaults + return + + +def _callable(obj): + if isinstance(obj, ClassTypes): + return True + if getattr(obj, '__call__', None) is not None: + return True + return False + + +def _is_list(obj): + # checks for list or tuples + # XXXX badly named! + return type(obj) in (list, tuple) + + +def _instance_callable(obj): + """Given an object, return True if the object is callable. + For classes, return True if instances would be callable.""" + if not isinstance(obj, ClassTypes): + # already an instance + return getattr(obj, '__call__', None) is not None + + if six.PY3: + # *could* be broken by a class overriding __mro__ or __dict__ via + # a metaclass + for base in (obj,) + obj.__mro__: + if base.__dict__.get('__call__') is not None: + return True + else: + klass = obj + # uses __bases__ instead of __mro__ so that we work with old style classes + if klass.__dict__.get('__call__') is not None: + return True + + for base in klass.__bases__: + if _instance_callable(base): + return True + return False + + +def _set_signature(mock, original, instance=False): + # creates a function with signature (*args, **kwargs) that delegates to a + # mock. It still does signature checking by calling a lambda with the same + # signature as the original. + if not _callable(original): + return + + skipfirst = isinstance(original, ClassTypes) + result = _get_signature_object(original, instance, skipfirst) + if result is None: + return + func, sig = result + def checksig(*args, **kwargs): + sig.bind(*args, **kwargs) + _copy_func_details(func, checksig) + + name = original.__name__ + if not _isidentifier(name): + name = 'funcopy' + context = {'_checksig_': checksig, 'mock': mock} + src = """def %s(*args, **kwargs): + _checksig_(*args, **kwargs) + return mock(*args, **kwargs)""" % name + six.exec_(src, context) + funcopy = context[name] + _setup_func(funcopy, mock) + return funcopy + + +def _setup_func(funcopy, mock): + funcopy.mock = mock + + # can't use isinstance with mocks + if not _is_instance_mock(mock): + return + + def assert_called_with(*args, **kwargs): + return mock.assert_called_with(*args, **kwargs) + def assert_called_once_with(*args, **kwargs): + return mock.assert_called_once_with(*args, **kwargs) + def assert_has_calls(*args, **kwargs): + return mock.assert_has_calls(*args, **kwargs) + def assert_any_call(*args, **kwargs): + return mock.assert_any_call(*args, **kwargs) + def reset_mock(): + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + mock.reset_mock() + ret = funcopy.return_value + if _is_instance_mock(ret) and not ret is mock: + ret.reset_mock() + + funcopy.called = False + funcopy.call_count = 0 + funcopy.call_args = None + funcopy.call_args_list = _CallList() + funcopy.method_calls = _CallList() + funcopy.mock_calls = _CallList() + + funcopy.return_value = mock.return_value + funcopy.side_effect = mock.side_effect + funcopy._mock_children = mock._mock_children + + funcopy.assert_called_with = assert_called_with + funcopy.assert_called_once_with = assert_called_once_with + funcopy.assert_has_calls = assert_has_calls + funcopy.assert_any_call = assert_any_call + funcopy.reset_mock = reset_mock + + mock._mock_delegate = funcopy + + +def _is_magic(name): + return '__%s__' % name[2:-2] == name + + +class _SentinelObject(object): + "A unique, named, sentinel object." + def __init__(self, name): + self.name = name + + def __repr__(self): + return 'sentinel.%s' % self.name + + +class _Sentinel(object): + """Access attributes to return a named object, usable as a sentinel.""" + def __init__(self): + self._sentinels = {} + + def __getattr__(self, name): + if name == '__bases__': + # Without this help(unittest.mock) raises an exception + raise AttributeError + return self._sentinels.setdefault(name, _SentinelObject(name)) + + +sentinel = _Sentinel() + +DEFAULT = sentinel.DEFAULT +_missing = sentinel.MISSING +_deleted = sentinel.DELETED + + +class OldStyleClass: + pass +ClassType = type(OldStyleClass) + + +def _copy(value): + if type(value) in (dict, list, tuple, set): + return type(value)(value) + return value + + +ClassTypes = (type,) +if six.PY2: + ClassTypes = (type, ClassType) + +_allowed_names = set(( + 'return_value', '_mock_return_value', 'side_effect', + '_mock_side_effect', '_mock_parent', '_mock_new_parent', + '_mock_name', '_mock_new_name' +)) + + +def _delegating_property(name): + _allowed_names.add(name) + _the_name = '_mock_' + name + def _get(self, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + return getattr(self, _the_name) + return getattr(sig, name) + def _set(self, value, name=name, _the_name=_the_name): + sig = self._mock_delegate + if sig is None: + self.__dict__[_the_name] = value + else: + setattr(sig, name, value) + + return property(_get, _set) + + + +class _CallList(list): + + def __contains__(self, value): + if not isinstance(value, list): + return list.__contains__(self, value) + len_value = len(value) + len_self = len(self) + if len_value > len_self: + return False + + for i in range(0, len_self - len_value + 1): + sub_list = self[i:i+len_value] + if sub_list == value: + return True + return False + + def __repr__(self): + return pprint.pformat(list(self)) + + +def _check_and_set_parent(parent, value, name, new_name): + if not _is_instance_mock(value): + return False + if ((value._mock_name or value._mock_new_name) or + (value._mock_parent is not None) or + (value._mock_new_parent is not None)): + return False + + _parent = parent + while _parent is not None: + # setting a mock (value) as a child or return value of itself + # should not modify the mock + if _parent is value: + return False + _parent = _parent._mock_new_parent + + if new_name: + value._mock_new_parent = parent + value._mock_new_name = new_name + if name: + value._mock_parent = parent + value._mock_name = name + return True + +# Internal class to identify if we wrapped an iterator object or not. +class _MockIter(object): + def __init__(self, obj): + self.obj = iter(obj) + def __iter__(self): + return self + def __next__(self): + return next(self.obj) + +class Base(object): + _mock_return_value = DEFAULT + _mock_side_effect = None + def __init__(self, *args, **kwargs): + pass + + + +class NonCallableMock(Base): + """A non-callable version of `Mock`""" + + def __new__(cls, *args, **kw): + # every instance has its own class + # so we can create magic methods on the + # class without stomping on other mocks + new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__}) + instance = object.__new__(new) + return instance + + + def __init__( + self, spec=None, wraps=None, name=None, spec_set=None, + parent=None, _spec_state=None, _new_name='', _new_parent=None, + _spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs + ): + if _new_parent is None: + _new_parent = parent + + __dict__ = self.__dict__ + __dict__['_mock_parent'] = parent + __dict__['_mock_name'] = name + __dict__['_mock_new_name'] = _new_name + __dict__['_mock_new_parent'] = _new_parent + + if spec_set is not None: + spec = spec_set + spec_set = True + if _eat_self is None: + _eat_self = parent is not None + + self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self) + + __dict__['_mock_children'] = {} + __dict__['_mock_wraps'] = wraps + __dict__['_mock_delegate'] = None + + __dict__['_mock_called'] = False + __dict__['_mock_call_args'] = None + __dict__['_mock_call_count'] = 0 + __dict__['_mock_call_args_list'] = _CallList() + __dict__['_mock_mock_calls'] = _CallList() + + __dict__['method_calls'] = _CallList() + __dict__['_mock_unsafe'] = unsafe + + if kwargs: + self.configure_mock(**kwargs) + + _safe_super(NonCallableMock, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state + ) + + + def attach_mock(self, mock, attribute): + """ + Attach a mock as an attribute of this one, replacing its name and + parent. Calls to the attached mock will be recorded in the + `method_calls` and `mock_calls` attributes of this one.""" + mock._mock_parent = None + mock._mock_new_parent = None + mock._mock_name = '' + mock._mock_new_name = None + + setattr(self, attribute, mock) + + + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + + + def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False, + _eat_self=False): + _spec_class = None + _spec_signature = None + + if spec is not None and not _is_list(spec): + if isinstance(spec, ClassTypes): + _spec_class = spec + else: + _spec_class = _get_class(spec) + res = _get_signature_object(spec, + _spec_as_instance, _eat_self) + _spec_signature = res and res[1] + + spec = dir(spec) + + __dict__ = self.__dict__ + __dict__['_spec_class'] = _spec_class + __dict__['_spec_set'] = spec_set + __dict__['_spec_signature'] = _spec_signature + __dict__['_mock_methods'] = spec + + + def __get_return_value(self): + ret = self._mock_return_value + if self._mock_delegate is not None: + ret = self._mock_delegate.return_value + + if ret is DEFAULT: + ret = self._get_child_mock( + _new_parent=self, _new_name='()' + ) + self.return_value = ret + return ret + + + def __set_return_value(self, value): + if self._mock_delegate is not None: + self._mock_delegate.return_value = value + else: + self._mock_return_value = value + _check_and_set_parent(self, value, None, '()') + + __return_value_doc = "The value to be returned when the mock is called." + return_value = property(__get_return_value, __set_return_value, + __return_value_doc) + + + @property + def __class__(self): + if self._spec_class is None: + return type(self) + return self._spec_class + + called = _delegating_property('called') + call_count = _delegating_property('call_count') + call_args = _delegating_property('call_args') + call_args_list = _delegating_property('call_args_list') + mock_calls = _delegating_property('mock_calls') + + + def __get_side_effect(self): + delegated = self._mock_delegate + if delegated is None: + return self._mock_side_effect + sf = delegated.side_effect + if (sf is not None and not callable(sf) + and not isinstance(sf, _MockIter) and not _is_exception(sf)): + sf = _MockIter(sf) + delegated.side_effect = sf + return sf + + def __set_side_effect(self, value): + value = _try_iter(value) + delegated = self._mock_delegate + if delegated is None: + self._mock_side_effect = value + else: + delegated.side_effect = value + + side_effect = property(__get_side_effect, __set_side_effect) + + + def reset_mock(self, visited=None): + "Restore the mock object to its initial state." + if visited is None: + visited = [] + if id(self) in visited: + return + visited.append(id(self)) + + self.called = False + self.call_args = None + self.call_count = 0 + self.mock_calls = _CallList() + self.call_args_list = _CallList() + self.method_calls = _CallList() + + for child in self._mock_children.values(): + if isinstance(child, _SpecState): + continue + child.reset_mock(visited) + + ret = self._mock_return_value + if _is_instance_mock(ret) and ret is not self: + ret.reset_mock(visited) + + + def configure_mock(self, **kwargs): + """Set attributes on the mock through keyword arguments. + + Attributes plus return values and side effects can be set on child + mocks using standard dot notation and unpacking a dictionary in the + method call: + + >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError} + >>> mock.configure_mock(**attrs)""" + for arg, val in sorted(kwargs.items(), + # we sort on the number of dots so that + # attributes are set before we set attributes on + # attributes + key=lambda entry: entry[0].count('.')): + args = arg.split('.') + final = args.pop() + obj = self + for entry in args: + obj = getattr(obj, entry) + setattr(obj, final, val) + + + def __getattr__(self, name): + if name in ('_mock_methods', '_mock_unsafe'): + raise AttributeError(name) + elif self._mock_methods is not None: + if name not in self._mock_methods or name in _all_magics: + raise AttributeError("Mock object has no attribute %r" % name) + elif _is_magic(name): + raise AttributeError(name) + if not self._mock_unsafe: + if name.startswith(('assert', 'assret')): + raise AttributeError(name) + + result = self._mock_children.get(name) + if result is _deleted: + raise AttributeError(name) + elif result is None: + wraps = None + if self._mock_wraps is not None: + # XXXX should we get the attribute without triggering code + # execution? + wraps = getattr(self._mock_wraps, name) + + result = self._get_child_mock( + parent=self, name=name, wraps=wraps, _new_name=name, + _new_parent=self + ) + self._mock_children[name] = result + + elif isinstance(result, _SpecState): + result = create_autospec( + result.spec, result.spec_set, result.instance, + result.parent, result.name + ) + self._mock_children[name] = result + + return result + + + def __repr__(self): + _name_list = [self._mock_new_name] + _parent = self._mock_new_parent + last = self + + dot = '.' + if _name_list == ['()']: + dot = '' + seen = set() + while _parent is not None: + last = _parent + + _name_list.append(_parent._mock_new_name + dot) + dot = '.' + if _parent._mock_new_name == '()': + dot = '' + + _parent = _parent._mock_new_parent + + # use ids here so as not to call __hash__ on the mocks + if id(_parent) in seen: + break + seen.add(id(_parent)) + + _name_list = list(reversed(_name_list)) + _first = last._mock_name or 'mock' + if len(_name_list) > 1: + if _name_list[1] not in ('()', '().'): + _first += '.' + _name_list[0] = _first + name = ''.join(_name_list) + + name_string = '' + if name not in ('mock', 'mock.'): + name_string = ' name=%r' % name + + spec_string = '' + if self._spec_class is not None: + spec_string = ' spec=%r' + if self._spec_set: + spec_string = ' spec_set=%r' + spec_string = spec_string % self._spec_class.__name__ + return "<%s%s%s id='%s'>" % ( + type(self).__name__, + name_string, + spec_string, + id(self) + ) + + + def __dir__(self): + """Filter the output of `dir(mock)` to only useful members.""" + if not mock.FILTER_DIR and getattr(object, '__dir__', None): + # object.__dir__ is not in 2.7 + return object.__dir__(self) + + extras = self._mock_methods or [] + from_type = dir(type(self)) + from_dict = list(self.__dict__) + + if mock.FILTER_DIR: + # object.__dir__ is not in 2.7 + from_type = [e for e in from_type if not e.startswith('_')] + from_dict = [e for e in from_dict if not e.startswith('_') or + _is_magic(e)] + return sorted(set(extras + from_type + from_dict + + list(self._mock_children))) + + + def __setattr__(self, name, value): + if name in _allowed_names: + # property setters go through here + return object.__setattr__(self, name, value) + elif (self._spec_set and self._mock_methods is not None and + name not in self._mock_methods and + name not in self.__dict__): + raise AttributeError("Mock object has no attribute '%s'" % name) + elif name in _unsupported_magics: + msg = 'Attempting to set unsupported magic method %r.' % name + raise AttributeError(msg) + elif name in _all_magics: + if self._mock_methods is not None and name not in self._mock_methods: + raise AttributeError("Mock object has no attribute '%s'" % name) + + if not _is_instance_mock(value): + setattr(type(self), name, _get_method(name, value)) + original = value + value = lambda *args, **kw: original(self, *args, **kw) + else: + # only set _new_name and not name so that mock_calls is tracked + # but not method calls + _check_and_set_parent(self, value, None, name) + setattr(type(self), name, value) + self._mock_children[name] = value + elif name == '__class__': + self._spec_class = value + return + else: + if _check_and_set_parent(self, value, name, name): + self._mock_children[name] = value + return object.__setattr__(self, name, value) + + + def __delattr__(self, name): + if name in _all_magics and name in type(self).__dict__: + delattr(type(self), name) + if name not in self.__dict__: + # for magic methods that are still MagicProxy objects and + # not set on the instance itself + return + + if name in self.__dict__: + object.__delattr__(self, name) + + obj = self._mock_children.get(name, _missing) + if obj is _deleted: + raise AttributeError(name) + if obj is not _missing: + del self._mock_children[name] + self._mock_children[name] = _deleted + + + def _format_mock_call_signature(self, args, kwargs): + name = self._mock_name or 'mock' + return _format_call_signature(name, args, kwargs) + + + def _format_mock_failure_message(self, args, kwargs): + message = 'Expected call: %s\nActual call: %s' + expected_string = self._format_mock_call_signature(args, kwargs) + call_args = self.call_args + if len(call_args) == 3: + call_args = call_args[1:] + actual_string = self._format_mock_call_signature(*call_args) + return message % (expected_string, actual_string) + + + def _call_matcher(self, _call): + """ + Given a call (or simply a (args, kwargs) tuple), return a + comparison key suitable for matching with other calls. + This is a best effort method which relies on the spec's signature, + if available, or falls back on the arguments themselves. + """ + sig = self._spec_signature + if sig is not None: + if len(_call) == 2: + name = '' + args, kwargs = _call + else: + name, args, kwargs = _call + try: + return name, sig.bind(*args, **kwargs) + except TypeError as e: + e.__traceback__ = None + return e + else: + return _call + + def assert_not_called(_mock_self): + """assert that the mock was never called. + """ + self = _mock_self + if self.call_count != 0: + msg = ("Expected '%s' to not have been called. Called %s times." % + (self._mock_name or 'mock', self.call_count)) + raise AssertionError(msg) + + def assert_called(_mock_self): + """assert that the mock was called at least once + """ + self = _mock_self + if self.call_count == 0: + msg = ("Expected '%s' to have been called." % + self._mock_name or 'mock') + raise AssertionError(msg) + + def assert_called_once(_mock_self): + """assert that the mock was called only once. + """ + self = _mock_self + if not self.call_count == 1: + msg = ("Expected '%s' to have been called once. Called %s times." % + (self._mock_name or 'mock', self.call_count)) + raise AssertionError(msg) + + def assert_called_with(_mock_self, *args, **kwargs): + """assert that the mock was called with the specified arguments. + + Raises an AssertionError if the args and keyword args passed in are + different to the last call to the mock.""" + self = _mock_self + if self.call_args is None: + expected = self._format_mock_call_signature(args, kwargs) + raise AssertionError('Expected call: %s\nNot called' % (expected,)) + + def _error_message(cause): + msg = self._format_mock_failure_message(args, kwargs) + if six.PY2 and cause is not None: + # Tack on some diagnostics for Python without __cause__ + msg = '%s\n%s' % (msg, str(cause)) + return msg + expected = self._call_matcher((args, kwargs)) + actual = self._call_matcher(self.call_args) + if expected != actual: + cause = expected if isinstance(expected, Exception) else None + six.raise_from(AssertionError(_error_message(cause)), cause) + + + def assert_called_once_with(_mock_self, *args, **kwargs): + """assert that the mock was called exactly once and with the specified + arguments.""" + self = _mock_self + if not self.call_count == 1: + msg = ("Expected '%s' to be called once. Called %s times." % + (self._mock_name or 'mock', self.call_count)) + raise AssertionError(msg) + return self.assert_called_with(*args, **kwargs) + + + def assert_has_calls(self, calls, any_order=False): + """assert the mock has been called with the specified calls. + The `mock_calls` list is checked for the calls. + + If `any_order` is False (the default) then the calls must be + sequential. There can be extra calls before or after the + specified calls. + + If `any_order` is True then the calls can be in any order, but + they must all appear in `mock_calls`.""" + expected = [self._call_matcher(c) for c in calls] + cause = expected if isinstance(expected, Exception) else None + all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls) + if not any_order: + if expected not in all_calls: + six.raise_from(AssertionError( + 'Calls not found.\nExpected: %r\n' + 'Actual: %r' % (_CallList(calls), self.mock_calls) + ), cause) + return + + all_calls = list(all_calls) + + not_found = [] + for kall in expected: + try: + all_calls.remove(kall) + except ValueError: + not_found.append(kall) + if not_found: + six.raise_from(AssertionError( + '%r not all found in call list' % (tuple(not_found),) + ), cause) + + + def assert_any_call(self, *args, **kwargs): + """assert the mock has been called with the specified arguments. + + The assert passes if the mock has *ever* been called, unlike + `assert_called_with` and `assert_called_once_with` that only pass if + the call is the most recent one.""" + expected = self._call_matcher((args, kwargs)) + actual = [self._call_matcher(c) for c in self.call_args_list] + if expected not in actual: + cause = expected if isinstance(expected, Exception) else None + expected_string = self._format_mock_call_signature(args, kwargs) + six.raise_from(AssertionError( + '%s call not found' % expected_string + ), cause) + + + def _get_child_mock(self, **kw): + """Create the child mocks for attributes and return value. + By default child mocks will be the same type as the parent. + Subclasses of Mock may want to override this to customize the way + child mocks are made. + + For non-callable mocks the callable variant will be used (rather than + any custom subclass).""" + _type = type(self) + if not issubclass(_type, CallableMixin): + if issubclass(_type, NonCallableMagicMock): + klass = MagicMock + elif issubclass(_type, NonCallableMock) : + klass = Mock + else: + klass = _type.__mro__[1] + return klass(**kw) + + + +def _try_iter(obj): + if obj is None: + return obj + if _is_exception(obj): + return obj + if _callable(obj): + return obj + try: + return iter(obj) + except TypeError: + # XXXX backwards compatibility + # but this will blow up on first call - so maybe we should fail early? + return obj + + + +class CallableMixin(Base): + + def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, + wraps=None, name=None, spec_set=None, parent=None, + _spec_state=None, _new_name='', _new_parent=None, **kwargs): + self.__dict__['_mock_return_value'] = return_value + + _safe_super(CallableMixin, self).__init__( + spec, wraps, name, spec_set, parent, + _spec_state, _new_name, _new_parent, **kwargs + ) + + self.side_effect = side_effect + + + def _mock_check_sig(self, *args, **kwargs): + # stub method that can be replaced with one with a specific signature + pass + + + def __call__(_mock_self, *args, **kwargs): + # can't use self in-case a function / method we are mocking uses self + # in the signature + _mock_self._mock_check_sig(*args, **kwargs) + return _mock_self._mock_call(*args, **kwargs) + + + def _mock_call(_mock_self, *args, **kwargs): + self = _mock_self + self.called = True + self.call_count += 1 + _new_name = self._mock_new_name + _new_parent = self._mock_new_parent + + _call = _Call((args, kwargs), two=True) + self.call_args = _call + self.call_args_list.append(_call) + self.mock_calls.append(_Call(('', args, kwargs))) + + seen = set() + skip_next_dot = _new_name == '()' + do_method_calls = self._mock_parent is not None + name = self._mock_name + while _new_parent is not None: + this_mock_call = _Call((_new_name, args, kwargs)) + if _new_parent._mock_new_name: + dot = '.' + if skip_next_dot: + dot = '' + + skip_next_dot = False + if _new_parent._mock_new_name == '()': + skip_next_dot = True + + _new_name = _new_parent._mock_new_name + dot + _new_name + + if do_method_calls: + if _new_name == name: + this_method_call = this_mock_call + else: + this_method_call = _Call((name, args, kwargs)) + _new_parent.method_calls.append(this_method_call) + + do_method_calls = _new_parent._mock_parent is not None + if do_method_calls: + name = _new_parent._mock_name + '.' + name + + _new_parent.mock_calls.append(this_mock_call) + _new_parent = _new_parent._mock_new_parent + + # use ids here so as not to call __hash__ on the mocks + _new_parent_id = id(_new_parent) + if _new_parent_id in seen: + break + seen.add(_new_parent_id) + + ret_val = DEFAULT + effect = self.side_effect + if effect is not None: + if _is_exception(effect): + raise effect + + if not _callable(effect): + result = next(effect) + if _is_exception(result): + raise result + if result is DEFAULT: + result = self.return_value + return result + + ret_val = effect(*args, **kwargs) + + if (self._mock_wraps is not None and + self._mock_return_value is DEFAULT): + return self._mock_wraps(*args, **kwargs) + if ret_val is DEFAULT: + ret_val = self.return_value + return ret_val + + + +class Mock(CallableMixin, NonCallableMock): + """ + Create a new `Mock` object. `Mock` takes several optional arguments + that specify the behaviour of the Mock object: + + * `spec`: This can be either a list of strings or an existing object (a + class or instance) that acts as the specification for the mock object. If + you pass in an object then a list of strings is formed by calling dir on + the object (excluding unsupported magic attributes and methods). Accessing + any attribute not in this list will raise an `AttributeError`. + + If `spec` is an object (rather than a list of strings) then + `mock.__class__` returns the class of the spec object. This allows mocks + to pass `isinstance` tests. + + * `spec_set`: A stricter variant of `spec`. If used, attempting to *set* + or get an attribute on the mock that isn't on the object passed as + `spec_set` will raise an `AttributeError`. + + * `side_effect`: A function to be called whenever the Mock is called. See + the `side_effect` attribute. Useful for raising exceptions or + dynamically changing return values. The function is called with the same + arguments as the mock, and unless it returns `DEFAULT`, the return + value of this function is used as the return value. + + Alternatively `side_effect` can be an exception class or instance. In + this case the exception will be raised when the mock is called. + + If `side_effect` is an iterable then each call to the mock will return + the next value from the iterable. If any of the members of the iterable + are exceptions they will be raised instead of returned. + + * `return_value`: The value returned when the mock is called. By default + this is a new Mock (created on first access). See the + `return_value` attribute. + + * `wraps`: Item for the mock object to wrap. If `wraps` is not None then + calling the Mock will pass the call through to the wrapped object + (returning the real result). Attribute access on the mock will return a + Mock object that wraps the corresponding attribute of the wrapped object + (so attempting to access an attribute that doesn't exist will raise an + `AttributeError`). + + If the mock has an explicit `return_value` set then calls are not passed + to the wrapped object and the `return_value` is returned instead. + + * `name`: If the mock has a name then it will be used in the repr of the + mock. This can be useful for debugging. The name is propagated to child + mocks. + + Mocks can also be called with arbitrary keyword arguments. These will be + used to set attributes on the mock after it is created. + """ + + + +def _dot_lookup(thing, comp, import_path): + try: + return getattr(thing, comp) + except AttributeError: + __import__(import_path) + return getattr(thing, comp) + + +def _importer(target): + components = target.split('.') + import_path = components.pop(0) + thing = __import__(import_path) + + for comp in components: + import_path += ".%s" % comp + thing = _dot_lookup(thing, comp, import_path) + return thing + + +def _is_started(patcher): + # XXXX horrible + return hasattr(patcher, 'is_local') + + +class _patch(object): + + attribute_name = None + _active_patches = [] + + def __init__( + self, getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ): + if new_callable is not None: + if new is not DEFAULT: + raise ValueError( + "Cannot use 'new' and 'new_callable' together" + ) + if autospec is not None: + raise ValueError( + "Cannot use 'autospec' and 'new_callable' together" + ) + + self.getter = getter + self.attribute = attribute + self.new = new + self.new_callable = new_callable + self.spec = spec + self.create = create + self.has_local = False + self.spec_set = spec_set + self.autospec = autospec + self.kwargs = kwargs + self.additional_patchers = [] + + + def copy(self): + patcher = _patch( + self.getter, self.attribute, self.new, self.spec, + self.create, self.spec_set, + self.autospec, self.new_callable, self.kwargs + ) + patcher.attribute_name = self.attribute_name + patcher.additional_patchers = [ + p.copy() for p in self.additional_patchers + ] + return patcher + + + def __call__(self, func): + if isinstance(func, ClassTypes): + return self.decorate_class(func) + return self.decorate_callable(func) + + + def decorate_class(self, klass): + for attr in dir(klass): + if not attr.startswith(patch.TEST_PREFIX): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + patcher = self.copy() + setattr(klass, attr, patcher(attr_value)) + return klass + + + def decorate_callable(self, func): + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + @wraps(func) + def patched(*args, **keywargs): + extra_args = [] + entered_patchers = [] + + exc_info = tuple() + try: + for patching in patched.patchings: + arg = patching.__enter__() + entered_patchers.append(patching) + if patching.attribute_name is not None: + keywargs.update(arg) + elif patching.new is DEFAULT: + extra_args.append(arg) + + args += tuple(extra_args) + return func(*args, **keywargs) + except: + if (patching not in entered_patchers and + _is_started(patching)): + # the patcher may have been started, but an exception + # raised whilst entering one of its additional_patchers + entered_patchers.append(patching) + # Pass the exception to __exit__ + exc_info = sys.exc_info() + # re-raise the exception + raise + finally: + for patching in reversed(entered_patchers): + patching.__exit__(*exc_info) + + patched.patchings = [self] + return patched + + + def get_original(self): + target = self.getter() + name = self.attribute + + original = DEFAULT + local = False + + try: + original = target.__dict__[name] + except (AttributeError, KeyError): + original = getattr(target, name, DEFAULT) + else: + local = True + + if name in _builtins and isinstance(target, ModuleType): + self.create = True + + if not self.create and original is DEFAULT: + raise AttributeError( + "%s does not have the attribute %r" % (target, name) + ) + return original, local + + + def __enter__(self): + """Perform the patch.""" + new, spec, spec_set = self.new, self.spec, self.spec_set + autospec, kwargs = self.autospec, self.kwargs + new_callable = self.new_callable + self.target = self.getter() + + # normalise False to None + if spec is False: + spec = None + if spec_set is False: + spec_set = None + if autospec is False: + autospec = None + + if spec is not None and autospec is not None: + raise TypeError("Can't specify spec and autospec") + if ((spec is not None or autospec is not None) and + spec_set not in (True, None)): + raise TypeError("Can't provide explicit spec_set *and* spec or autospec") + + original, local = self.get_original() + + if new is DEFAULT and autospec is None: + inherit = False + if spec is True: + # set spec to the object we are replacing + spec = original + if spec_set is True: + spec_set = original + spec = None + elif spec is not None: + if spec_set is True: + spec_set = spec + spec = None + elif spec_set is True: + spec_set = original + + if spec is not None or spec_set is not None: + if original is DEFAULT: + raise TypeError("Can't use 'spec' with create=True") + if isinstance(original, ClassTypes): + # If we're patching out a class and there is a spec + inherit = True + + Klass = MagicMock + _kwargs = {} + if new_callable is not None: + Klass = new_callable + elif spec is not None or spec_set is not None: + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if _is_list(this_spec): + not_callable = '__call__' not in this_spec + else: + not_callable = not _callable(this_spec) + if not_callable: + Klass = NonCallableMagicMock + + if spec is not None: + _kwargs['spec'] = spec + if spec_set is not None: + _kwargs['spec_set'] = spec_set + + # add a name to mocks + if (isinstance(Klass, type) and + issubclass(Klass, NonCallableMock) and self.attribute): + _kwargs['name'] = self.attribute + + _kwargs.update(kwargs) + new = Klass(**_kwargs) + + if inherit and _is_instance_mock(new): + # we can only tell if the instance should be callable if the + # spec is not a list + this_spec = spec + if spec_set is not None: + this_spec = spec_set + if (not _is_list(this_spec) and not + _instance_callable(this_spec)): + Klass = NonCallableMagicMock + + _kwargs.pop('name') + new.return_value = Klass(_new_parent=new, _new_name='()', + **_kwargs) + elif autospec is not None: + # spec is ignored, new *must* be default, spec_set is treated + # as a boolean. Should we check spec is not None and that spec_set + # is a bool? + if new is not DEFAULT: + raise TypeError( + "autospec creates the mock for you. Can't specify " + "autospec and new." + ) + if original is DEFAULT: + raise TypeError("Can't use 'autospec' with create=True") + spec_set = bool(spec_set) + if autospec is True: + autospec = original + + new = create_autospec(autospec, spec_set=spec_set, + _name=self.attribute, **kwargs) + elif kwargs: + # can't set keyword args when we aren't creating the mock + # XXXX If new is a Mock we could call new.configure_mock(**kwargs) + raise TypeError("Can't pass kwargs to a mock we aren't creating") + + new_attr = new + + self.temp_original = original + self.is_local = local + setattr(self.target, self.attribute, new_attr) + if self.attribute_name is not None: + extra_args = {} + if self.new is DEFAULT: + extra_args[self.attribute_name] = new + for patching in self.additional_patchers: + arg = patching.__enter__() + if patching.new is DEFAULT: + extra_args.update(arg) + return extra_args + + return new + + + def __exit__(self, *exc_info): + """Undo the patch.""" + if not _is_started(self): + raise RuntimeError('stop called on unstarted patcher') + + if self.is_local and self.temp_original is not DEFAULT: + setattr(self.target, self.attribute, self.temp_original) + else: + delattr(self.target, self.attribute) + if not self.create and (not hasattr(self.target, self.attribute) or + self.attribute in ('__doc__', '__module__', + '__defaults__', '__annotations__', + '__kwdefaults__')): + # needed for proxy objects like django settings + setattr(self.target, self.attribute, self.temp_original) + + del self.temp_original + del self.is_local + del self.target + for patcher in reversed(self.additional_patchers): + if _is_started(patcher): + patcher.__exit__(*exc_info) + + + def start(self): + """Activate a patch, returning any created mock.""" + result = self.__enter__() + self._active_patches.append(self) + return result + + + def stop(self): + """Stop an active patch.""" + try: + self._active_patches.remove(self) + except ValueError: + # If the patch hasn't been started this will fail + pass + + return self.__exit__() + + + +def _get_target(target): + try: + target, attribute = target.rsplit('.', 1) + except (TypeError, ValueError): + raise TypeError("Need a valid target to patch. You supplied: %r" % + (target,)) + getter = lambda: _importer(target) + return getter, attribute + + +def _patch_object( + target, attribute, new=DEFAULT, spec=None, + create=False, spec_set=None, autospec=None, + new_callable=None, **kwargs + ): + """ + patch the named member (`attribute`) on an object (`target`) with a mock + object. + + `patch.object` can be used as a decorator, class decorator or a context + manager. Arguments `new`, `spec`, `create`, `spec_set`, + `autospec` and `new_callable` have the same meaning as for `patch`. Like + `patch`, `patch.object` takes arbitrary keyword arguments for configuring + the mock object it creates. + + When used as a class decorator `patch.object` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + getter = lambda: target + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ) + + +def _patch_multiple(target, spec=None, create=False, spec_set=None, + autospec=None, new_callable=None, **kwargs): + """Perform multiple patches in a single call. It takes the object to be + patched (either as an object or a string to fetch the object by importing) + and keyword arguments for the patches:: + + with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'): + ... + + Use `DEFAULT` as the value if you want `patch.multiple` to create + mocks for you. In this case the created mocks are passed into a decorated + function by keyword, and a dictionary is returned when `patch.multiple` is + used as a context manager. + + `patch.multiple` can be used as a decorator, class decorator or a context + manager. The arguments `spec`, `spec_set`, `create`, + `autospec` and `new_callable` have the same meaning as for `patch`. These + arguments will be applied to *all* patches done by `patch.multiple`. + + When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX` + for choosing which methods to wrap. + """ + if type(target) in (unicode, str): + getter = lambda: _importer(target) + else: + getter = lambda: target + + if not kwargs: + raise ValueError( + 'Must supply at least one keyword argument with patch.multiple' + ) + # need to wrap in a list for python 3, where items is a view + items = list(kwargs.items()) + attribute, new = items[0] + patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + patcher.attribute_name = attribute + for attribute, new in items[1:]: + this_patcher = _patch( + getter, attribute, new, spec, create, spec_set, + autospec, new_callable, {} + ) + this_patcher.attribute_name = attribute + patcher.additional_patchers.append(this_patcher) + return patcher + + +def patch( + target, new=DEFAULT, spec=None, create=False, + spec_set=None, autospec=None, new_callable=None, **kwargs + ): + """ + `patch` acts as a function decorator, class decorator or a context + manager. Inside the body of the function or with statement, the `target` + is patched with a `new` object. When the function/with statement exits + the patch is undone. + + If `new` is omitted, then the target is replaced with a + `MagicMock`. If `patch` is used as a decorator and `new` is + omitted, the created mock is passed in as an extra argument to the + decorated function. If `patch` is used as a context manager the created + mock is returned by the context manager. + + `target` should be a string in the form `'package.module.ClassName'`. The + `target` is imported and the specified object replaced with the `new` + object, so the `target` must be importable from the environment you are + calling `patch` from. The target is imported when the decorated function + is executed, not at decoration time. + + The `spec` and `spec_set` keyword arguments are passed to the `MagicMock` + if patch is creating one for you. + + In addition you can pass `spec=True` or `spec_set=True`, which causes + patch to pass in the object being mocked as the spec/spec_set object. + + `new_callable` allows you to specify a different class, or callable object, + that will be called to create the `new` object. By default `MagicMock` is + used. + + A more powerful form of `spec` is `autospec`. If you set `autospec=True` + then the mock will be created with a spec from the object being replaced. + All attributes of the mock will also have the spec of the corresponding + attribute of the object being replaced. Methods and functions being + mocked will have their arguments checked and will raise a `TypeError` if + they are called with the wrong signature. For mocks replacing a class, + their return value (the 'instance') will have the same spec as the class. + + Instead of `autospec=True` you can pass `autospec=some_object` to use an + arbitrary object as the spec instead of the one being replaced. + + By default `patch` will fail to replace attributes that don't exist. If + you pass in `create=True`, and the attribute doesn't exist, patch will + create the attribute for you when the patched function is called, and + delete it again afterwards. This is useful for writing tests against + attributes that your production code creates at runtime. It is off by + default because it can be dangerous. With it switched on you can write + passing tests against APIs that don't actually exist! + + Patch can be used as a `TestCase` class decorator. It works by + decorating each test method in the class. This reduces the boilerplate + code when your test methods share a common patchings set. `patch` finds + tests by looking for method names that start with `patch.TEST_PREFIX`. + By default this is `test`, which matches the way `unittest` finds tests. + You can specify an alternative prefix by setting `patch.TEST_PREFIX`. + + Patch can be used as a context manager, with the with statement. Here the + patching applies to the indented block after the with statement. If you + use "as" then the patched object will be bound to the name after the + "as"; very useful if `patch` is creating a mock object for you. + + `patch` takes arbitrary keyword arguments. These will be passed to + the `Mock` (or `new_callable`) on construction. + + `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are + available for alternate use-cases. + """ + getter, attribute = _get_target(target) + return _patch( + getter, attribute, new, spec, create, + spec_set, autospec, new_callable, kwargs + ) + + +class _patch_dict(object): + """ + Patch a dictionary, or dictionary like object, and restore the dictionary + to its original state after the test. + + `in_dict` can be a dictionary or a mapping like container. If it is a + mapping then it must at least support getting, setting and deleting items + plus iterating over keys. + + `in_dict` can also be a string specifying the name of the dictionary, which + will then be fetched by importing it. + + `values` can be a dictionary of values to set in the dictionary. `values` + can also be an iterable of `(key, value)` pairs. + + If `clear` is True then the dictionary will be cleared before the new + values are set. + + `patch.dict` can also be called with arbitrary keyword arguments to set + values in the dictionary:: + + with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()): + ... + + `patch.dict` can be used as a context manager, decorator or class + decorator. When used as a class decorator `patch.dict` honours + `patch.TEST_PREFIX` for choosing which methods to wrap. + """ + + def __init__(self, in_dict, values=(), clear=False, **kwargs): + if isinstance(in_dict, basestring): + in_dict = _importer(in_dict) + self.in_dict = in_dict + # support any argument supported by dict(...) constructor + self.values = dict(values) + self.values.update(kwargs) + self.clear = clear + self._original = None + + + def __call__(self, f): + if isinstance(f, ClassTypes): + return self.decorate_class(f) + @wraps(f) + def _inner(*args, **kw): + self._patch_dict() + try: + return f(*args, **kw) + finally: + self._unpatch_dict() + + return _inner + + + def decorate_class(self, klass): + for attr in dir(klass): + attr_value = getattr(klass, attr) + if (attr.startswith(patch.TEST_PREFIX) and + hasattr(attr_value, "__call__")): + decorator = _patch_dict(self.in_dict, self.values, self.clear) + decorated = decorator(attr_value) + setattr(klass, attr, decorated) + return klass + + + def __enter__(self): + """Patch the dict.""" + self._patch_dict() + + + def _patch_dict(self): + values = self.values + in_dict = self.in_dict + clear = self.clear + + try: + original = in_dict.copy() + except AttributeError: + # dict like object with no copy method + # must support iteration over keys + original = {} + for key in in_dict: + original[key] = in_dict[key] + self._original = original + + if clear: + _clear_dict(in_dict) + + try: + in_dict.update(values) + except AttributeError: + # dict like object with no update method + for key in values: + in_dict[key] = values[key] + + + def _unpatch_dict(self): + in_dict = self.in_dict + original = self._original + + _clear_dict(in_dict) + + try: + in_dict.update(original) + except AttributeError: + for key in original: + in_dict[key] = original[key] + + + def __exit__(self, *args): + """Unpatch the dict.""" + self._unpatch_dict() + return False + + start = __enter__ + stop = __exit__ + + +def _clear_dict(in_dict): + try: + in_dict.clear() + except AttributeError: + keys = list(in_dict) + for key in keys: + del in_dict[key] + + +def _patch_stopall(): + """Stop all active patches. LIFO to unroll nested patches.""" + for patch in reversed(_patch._active_patches): + patch.stop() + + +patch.object = _patch_object +patch.dict = _patch_dict +patch.multiple = _patch_multiple +patch.stopall = _patch_stopall +patch.TEST_PREFIX = 'test' + +magic_methods = ( + "lt le gt ge eq ne " + "getitem setitem delitem " + "len contains iter " + "hash str sizeof " + "enter exit " + # we added divmod and rdivmod here instead of numerics + # because there is no idivmod + "divmod rdivmod neg pos abs invert " + "complex int float index " + "trunc floor ceil " +) + +numerics = ( + "add sub mul matmul div floordiv mod lshift rshift and xor or pow" +) +if six.PY3: + numerics += ' truediv' +inplace = ' '.join('i%s' % n for n in numerics.split()) +right = ' '.join('r%s' % n for n in numerics.split()) +extra = '' +if six.PY3: + extra = 'bool next ' +else: + extra = 'unicode long nonzero oct hex truediv rtruediv ' + +# not including __prepare__, __instancecheck__, __subclasscheck__ +# (as they are metaclass methods) +# __del__ is not supported at all as it causes problems if it exists + +_non_defaults = set(( + '__cmp__', '__getslice__', '__setslice__', '__coerce__', # <3.x + '__get__', '__set__', '__delete__', '__reversed__', '__missing__', + '__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__', + '__getstate__', '__setstate__', '__getformat__', '__setformat__', + '__repr__', '__dir__', '__subclasses__', '__format__', +)) + + +def _get_method(name, func): + "Turns a callable object (like a mock) into a real function" + def method(self, *args, **kw): + return func(self, *args, **kw) + method.__name__ = name + return method + + +_magics = set( + '__%s__' % method for method in + ' '.join([magic_methods, numerics, inplace, right, extra]).split() +) + +_all_magics = _magics | _non_defaults + +_unsupported_magics = set(( + '__getattr__', '__setattr__', + '__init__', '__new__', '__prepare__' + '__instancecheck__', '__subclasscheck__', + '__del__' +)) + +_calculate_return_value = { + '__hash__': lambda self: object.__hash__(self), + '__str__': lambda self: object.__str__(self), + '__sizeof__': lambda self: object.__sizeof__(self), + '__unicode__': lambda self: unicode(object.__str__(self)), +} + +_return_values = { + '__lt__': NotImplemented, + '__gt__': NotImplemented, + '__le__': NotImplemented, + '__ge__': NotImplemented, + '__int__': 1, + '__contains__': False, + '__len__': 0, + '__exit__': False, + '__complex__': 1j, + '__float__': 1.0, + '__bool__': True, + '__nonzero__': True, + '__oct__': '1', + '__hex__': '0x1', + '__long__': long(1), + '__index__': 1, +} + + +def _get_eq(self): + def __eq__(other): + ret_val = self.__eq__._mock_return_value + if ret_val is not DEFAULT: + return ret_val + return self is other + return __eq__ + +def _get_ne(self): + def __ne__(other): + if self.__ne__._mock_return_value is not DEFAULT: + return DEFAULT + return self is not other + return __ne__ + +def _get_iter(self): + def __iter__(): + ret_val = self.__iter__._mock_return_value + if ret_val is DEFAULT: + return iter([]) + # if ret_val was already an iterator, then calling iter on it should + # return the iterator unchanged + return iter(ret_val) + return __iter__ + +_side_effect_methods = { + '__eq__': _get_eq, + '__ne__': _get_ne, + '__iter__': _get_iter, +} + + + +def _set_return_value(mock, method, name): + fixed = _return_values.get(name, DEFAULT) + if fixed is not DEFAULT: + method.return_value = fixed + return + + return_calulator = _calculate_return_value.get(name) + if return_calulator is not None: + try: + return_value = return_calulator(mock) + except AttributeError: + # XXXX why do we return AttributeError here? + # set it as a side_effect instead? + return_value = AttributeError(name) + method.return_value = return_value + return + + side_effector = _side_effect_methods.get(name) + if side_effector is not None: + method.side_effect = side_effector(mock) + + + +class MagicMixin(object): + def __init__(self, *args, **kw): + self._mock_set_magics() # make magic work for kwargs in init + _safe_super(MagicMixin, self).__init__(*args, **kw) + self._mock_set_magics() # fix magic broken by upper level init + + + def _mock_set_magics(self): + these_magics = _magics + + if getattr(self, "_mock_methods", None) is not None: + these_magics = _magics.intersection(self._mock_methods) + + remove_magics = set() + remove_magics = _magics - these_magics + + for entry in remove_magics: + if entry in type(self).__dict__: + # remove unneeded magic methods + delattr(self, entry) + + # don't overwrite existing attributes if called a second time + these_magics = these_magics - set(type(self).__dict__) + + _type = type(self) + for entry in these_magics: + setattr(_type, entry, MagicProxy(entry, self)) + + + +class NonCallableMagicMock(MagicMixin, NonCallableMock): + """A version of `MagicMock` that isn't callable.""" + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + + +class MagicMock(MagicMixin, Mock): + """ + MagicMock is a subclass of Mock with default implementations + of most of the magic methods. You can use MagicMock without having to + configure the magic methods yourself. + + If you use the `spec` or `spec_set` arguments then *only* magic + methods that exist in the spec will be created. + + Attributes and the return value of a `MagicMock` will also be `MagicMocks`. + """ + def mock_add_spec(self, spec, spec_set=False): + """Add a spec to a mock. `spec` can either be an object or a + list of strings. Only attributes on the `spec` can be fetched as + attributes from the mock. + + If `spec_set` is True then only attributes on the spec can be set.""" + self._mock_add_spec(spec, spec_set) + self._mock_set_magics() + + + +class MagicProxy(object): + def __init__(self, name, parent): + self.name = name + self.parent = parent + + def __call__(self, *args, **kwargs): + m = self.create_mock() + return m(*args, **kwargs) + + def create_mock(self): + entry = self.name + parent = self.parent + m = parent._get_child_mock(name=entry, _new_name=entry, + _new_parent=parent) + setattr(parent, entry, m) + _set_return_value(parent, m, entry) + return m + + def __get__(self, obj, _type=None): + return self.create_mock() + + + +class _ANY(object): + "A helper object that compares equal to everything." + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __repr__(self): + return '' + +ANY = _ANY() + + + +def _format_call_signature(name, args, kwargs): + message = '%s(%%s)' % name + formatted_args = '' + args_string = ', '.join([repr(arg) for arg in args]) + + def encode_item(item): + if six.PY2 and isinstance(item, unicode): + return item.encode("utf-8") + else: + return item + + kwargs_string = ', '.join([ + '%s=%r' % (encode_item(key), value) for key, value in sorted(kwargs.items()) + ]) + if args_string: + formatted_args = args_string + if kwargs_string: + if formatted_args: + formatted_args += ', ' + formatted_args += kwargs_string + + return message % formatted_args + + + +class _Call(tuple): + """ + A tuple for holding the results of a call to a mock, either in the form + `(args, kwargs)` or `(name, args, kwargs)`. + + If args or kwargs are empty then a call tuple will compare equal to + a tuple without those values. This makes comparisons less verbose:: + + _Call(('name', (), {})) == ('name',) + _Call(('name', (1,), {})) == ('name', (1,)) + _Call(((), {'a': 'b'})) == ({'a': 'b'},) + + The `_Call` object provides a useful shortcut for comparing with call:: + + _Call(((1, 2), {'a': 3})) == call(1, 2, a=3) + _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3) + + If the _Call has no name then it will match any name. + """ + def __new__(cls, value=(), name=None, parent=None, two=False, + from_kall=True): + name = '' + args = () + kwargs = {} + _len = len(value) + if _len == 3: + name, args, kwargs = value + elif _len == 2: + first, second = value + if isinstance(first, basestring): + name = first + if isinstance(second, tuple): + args = second + else: + kwargs = second + else: + args, kwargs = first, second + elif _len == 1: + value, = value + if isinstance(value, basestring): + name = value + elif isinstance(value, tuple): + args = value + else: + kwargs = value + + if two: + return tuple.__new__(cls, (args, kwargs)) + + return tuple.__new__(cls, (name, args, kwargs)) + + + def __init__(self, value=(), name=None, parent=None, two=False, + from_kall=True): + self.name = name + self.parent = parent + self.from_kall = from_kall + + + def __eq__(self, other): + if other is ANY: + return True + try: + len_other = len(other) + except TypeError: + return False + + self_name = '' + if len(self) == 2: + self_args, self_kwargs = self + else: + self_name, self_args, self_kwargs = self + + other_name = '' + if len_other == 0: + other_args, other_kwargs = (), {} + elif len_other == 3: + other_name, other_args, other_kwargs = other + elif len_other == 1: + value, = other + if isinstance(value, tuple): + other_args = value + other_kwargs = {} + elif isinstance(value, basestring): + other_name = value + other_args, other_kwargs = (), {} + else: + other_args = () + other_kwargs = value + elif len_other == 2: + # could be (name, args) or (name, kwargs) or (args, kwargs) + first, second = other + if isinstance(first, basestring): + other_name = first + if isinstance(second, tuple): + other_args, other_kwargs = second, {} + else: + other_args, other_kwargs = (), second + else: + other_args, other_kwargs = first, second + else: + return False + + if self_name and other_name != self_name: + return False + + # this order is important for ANY to work! + return (other_args, other_kwargs) == (self_args, self_kwargs) + + + def __ne__(self, other): + return not self.__eq__(other) + + + def __call__(self, *args, **kwargs): + if self.name is None: + return _Call(('', args, kwargs), name='()') + + name = self.name + '()' + return _Call((self.name, args, kwargs), name=name, parent=self) + + + def __getattr__(self, attr): + if self.name is None: + return _Call(name=attr, from_kall=False) + name = '%s.%s' % (self.name, attr) + return _Call(name=name, parent=self, from_kall=False) + + + def count(self, *args, **kwargs): + return self.__getattr__('count')(*args, **kwargs) + + def index(self, *args, **kwargs): + return self.__getattr__('index')(*args, **kwargs) + + def __repr__(self): + if not self.from_kall: + name = self.name or 'call' + if name.startswith('()'): + name = 'call%s' % name + return name + + if len(self) == 2: + name = 'call' + args, kwargs = self + else: + name, args, kwargs = self + if not name: + name = 'call' + elif not name.startswith('()'): + name = 'call.%s' % name + else: + name = 'call%s' % name + return _format_call_signature(name, args, kwargs) + + + def call_list(self): + """For a call object that represents multiple calls, `call_list` + returns a list of all the intermediate calls as well as the + final call.""" + vals = [] + thing = self + while thing is not None: + if thing.from_kall: + vals.append(thing) + thing = thing.parent + return _CallList(reversed(vals)) + + +call = _Call(from_kall=False) + + + +def create_autospec(spec, spec_set=False, instance=False, _parent=None, + _name=None, **kwargs): + """Create a mock object using another object as a spec. Attributes on the + mock will use the corresponding attribute on the `spec` object as their + spec. + + Functions or methods being mocked will have their arguments checked + to check that they are called with the correct signature. + + If `spec_set` is True then attempting to set attributes that don't exist + on the spec object will raise an `AttributeError`. + + If a class is used as a spec then the return value of the mock (the + instance of the class) will have the same spec. You can use a class as the + spec for an instance object by passing `instance=True`. The returned mock + will only be callable if instances of the mock are callable. + + `create_autospec` also takes arbitrary keyword arguments that are passed to + the constructor of the created mock.""" + if _is_list(spec): + # can't pass a list instance to the mock constructor as it will be + # interpreted as a list of strings + spec = type(spec) + + is_type = isinstance(spec, ClassTypes) + + _kwargs = {'spec': spec} + if spec_set: + _kwargs = {'spec_set': spec} + elif spec is None: + # None we mock with a normal mock without a spec + _kwargs = {} + if _kwargs and instance: + _kwargs['_spec_as_instance'] = True + + _kwargs.update(kwargs) + + Klass = MagicMock + if type(spec) in DescriptorTypes: + # descriptors don't have a spec + # because we don't know what type they return + _kwargs = {} + elif not _callable(spec): + Klass = NonCallableMagicMock + elif is_type and instance and not _instance_callable(spec): + Klass = NonCallableMagicMock + + _name = _kwargs.pop('name', _name) + + _new_name = _name + if _parent is None: + # for a top level object no _new_name should be set + _new_name = '' + + mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name, + name=_name, **_kwargs) + + if isinstance(spec, FunctionTypes): + # should only happen at the top level because we don't + # recurse for functions + mock = _set_signature(mock, spec) + else: + _check_signature(spec, mock, is_type, instance) + + if _parent is not None and not instance: + _parent._mock_children[_name] = mock + + if is_type and not instance and 'return_value' not in kwargs: + mock.return_value = create_autospec(spec, spec_set, instance=True, + _name='()', _parent=mock) + + for entry in dir(spec): + if _is_magic(entry): + # MagicMock already does the useful magic methods for us + continue + + # XXXX do we need a better way of getting attributes without + # triggering code execution (?) Probably not - we need the actual + # object to mock it so we would rather trigger a property than mock + # the property descriptor. Likewise we want to mock out dynamically + # provided attributes. + # XXXX what about attributes that raise exceptions other than + # AttributeError on being fetched? + # we could be resilient against it, or catch and propagate the + # exception when the attribute is fetched from the mock + try: + original = getattr(spec, entry) + except AttributeError: + continue + + kwargs = {'spec': original} + if spec_set: + kwargs = {'spec_set': original} + + if not isinstance(original, FunctionTypes): + new = _SpecState(original, spec_set, mock, entry, instance) + mock._mock_children[entry] = new + else: + parent = mock + if isinstance(spec, FunctionTypes): + parent = mock.mock + + skipfirst = _must_skip(spec, entry, is_type) + kwargs['_eat_self'] = skipfirst + new = MagicMock(parent=parent, name=entry, _new_name=entry, + _new_parent=parent, + **kwargs) + mock._mock_children[entry] = new + _check_signature(original, new, skipfirst=skipfirst) + + # so functions created with _set_signature become instance attributes, + # *plus* their underlying mock exists in _mock_children of the parent + # mock. Adding to _mock_children may be unnecessary where we are also + # setting as an instance attribute? + if isinstance(new, FunctionTypes): + setattr(mock, entry, new) + + return mock + + +def _must_skip(spec, entry, is_type): + """ + Return whether we should skip the first argument on spec's `entry` + attribute. + """ + if not isinstance(spec, ClassTypes): + if entry in getattr(spec, '__dict__', {}): + # instance attribute - shouldn't skip + return False + spec = spec.__class__ + if not hasattr(spec, '__mro__'): + # old style class: can't have descriptors anyway + return is_type + + for klass in spec.__mro__: + result = klass.__dict__.get(entry, DEFAULT) + if result is DEFAULT: + continue + if isinstance(result, (staticmethod, classmethod)): + return False + elif isinstance(getattr(result, '__get__', None), MethodWrapperTypes): + # Normal method => skip if looked up on type + # (if looked up on instance, self is already skipped) + return is_type + else: + return False + + # shouldn't get here unless function is a dynamically provided attribute + # XXXX untested behaviour + return is_type + + +def _get_class(obj): + try: + return obj.__class__ + except AttributeError: + # it is possible for objects to have no __class__ + return type(obj) + + +class _SpecState(object): + + def __init__(self, spec, spec_set=False, parent=None, + name=None, ids=None, instance=False): + self.spec = spec + self.ids = ids + self.spec_set = spec_set + self.parent = parent + self.instance = instance + self.name = name + + +FunctionTypes = ( + # python function + type(create_autospec), + # instance method + type(ANY.__eq__), +) + +MethodWrapperTypes = ( + type(ANY.__eq__.__get__), +) + + +file_spec = None + +def _iterate_read_data(read_data): + # Helper for mock_open: + # Retrieve lines from read_data via a generator so that separate calls to + # readline, read, and readlines are properly interleaved + sep = b'\n' if isinstance(read_data, bytes) else '\n' + data_as_list = [l + sep for l in read_data.split(sep)] + + if data_as_list[-1] == sep: + # If the last line ended in a newline, the list comprehension will have an + # extra entry that's just a newline. Remove this. + data_as_list = data_as_list[:-1] + else: + # If there wasn't an extra newline by itself, then the file being + # emulated doesn't have a newline to end the last line remove the + # newline that our naive format() added + data_as_list[-1] = data_as_list[-1][:-1] + + for line in data_as_list: + yield line + +def mock_open(mock=None, read_data=''): + """ + A helper function to create a mock to replace the use of `open`. It works + for `open` called directly or used as a context manager. + + The `mock` argument is the mock object to configure. If `None` (the + default) then a `MagicMock` will be created for you, with the API limited + to methods or attributes available on standard file handles. + + `read_data` is a string for the `read` methoddline`, and `readlines` of the + file handle to return. This is an empty string by default. + """ + def _readlines_side_effect(*args, **kwargs): + if handle.readlines.return_value is not None: + return handle.readlines.return_value + return list(_state[0]) + + def _read_side_effect(*args, **kwargs): + if handle.read.return_value is not None: + return handle.read.return_value + return type(read_data)().join(_state[0]) + + def _readline_side_effect(): + if handle.readline.return_value is not None: + while True: + yield handle.readline.return_value + for line in _state[0]: + yield line + + + global file_spec + if file_spec is None: + # set on first use + if six.PY3: + import _io + file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + else: + file_spec = file + + if mock is None: + mock = MagicMock(name='open', spec=open) + + handle = MagicMock(spec=file_spec) + handle.__enter__.return_value = handle + + _state = [_iterate_read_data(read_data), None] + + handle.write.return_value = None + handle.read.return_value = None + handle.readline.return_value = None + handle.readlines.return_value = None + + handle.read.side_effect = _read_side_effect + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + handle.readlines.side_effect = _readlines_side_effect + + def reset_data(*args, **kwargs): + _state[0] = _iterate_read_data(read_data) + if handle.readline.side_effect == _state[1]: + # Only reset the side effect if the user hasn't overridden it. + _state[1] = _readline_side_effect() + handle.readline.side_effect = _state[1] + return DEFAULT + + mock.side_effect = reset_data + mock.return_value = handle + return mock + + +class PropertyMock(Mock): + """ + A mock intended to be used as a property, or other descriptor, on a class. + `PropertyMock` provides `__get__` and `__set__` methods so you can specify + a return value when it is fetched. + + Fetching a `PropertyMock` instance from an object calls the mock, with + no args. Setting it calls the mock with the value being set. + """ + def _get_child_mock(self, **kwargs): + return MagicMock(**kwargs) + + def __get__(self, obj, obj_type): + return self() + def __set__(self, obj, val): + self(val) diff --git a/ext/mock/tests/__init__.py b/ext/mock/tests/__init__.py new file mode 100644 index 0000000000..54ddf2ecc1 --- /dev/null +++ b/ext/mock/tests/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ diff --git a/ext/mock/tests/__main__.py b/ext/mock/tests/__main__.py new file mode 100644 index 0000000000..45c633a4ee --- /dev/null +++ b/ext/mock/tests/__main__.py @@ -0,0 +1,18 @@ +import os +import unittest + + +def load_tests(loader, standard_tests, pattern): + # top level directory cached on loader instance + this_dir = os.path.dirname(__file__) + pattern = pattern or "test*.py" + # We are inside unittest.test.testmock, so the top-level is three notches up + top_level_dir = os.path.dirname(os.path.dirname(os.path.dirname(this_dir))) + package_tests = loader.discover(start_dir=this_dir, pattern=pattern, + top_level_dir=top_level_dir) + standard_tests.addTests(package_tests) + return standard_tests + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/support.py b/ext/mock/tests/support.py new file mode 100644 index 0000000000..8e2082ff89 --- /dev/null +++ b/ext/mock/tests/support.py @@ -0,0 +1,36 @@ +import sys + +info = sys.version_info +import unittest2 + + +try: + callable = callable +except NameError: + def callable(obj): + return hasattr(obj, '__call__') + + +with_available = sys.version_info[:2] >= (2, 5) + + +def is_instance(obj, klass): + """Version of is_instance that doesn't access __class__""" + return issubclass(type(obj), klass) + + +class SomeClass(object): + class_attribute = None + + def wibble(self): + pass + + +class X(object): + pass + +try: + next = next +except NameError: + def next(obj): + return obj.next() diff --git a/ext/mock/tests/testcallable.py b/ext/mock/tests/testcallable.py new file mode 100644 index 0000000000..10acdc3517 --- /dev/null +++ b/ext/mock/tests/testcallable.py @@ -0,0 +1,158 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +import unittest2 as unittest +from mock.tests.support import is_instance, X, SomeClass + +from mock import ( + Mock, MagicMock, NonCallableMagicMock, + NonCallableMock, patch, create_autospec, + CallableMixin +) + + + +class TestCallable(unittest.TestCase): + + def assertNotCallable(self, mock): + self.assertTrue(is_instance(mock, NonCallableMagicMock)) + self.assertFalse(is_instance(mock, CallableMixin)) + + + def test_non_callable(self): + for mock in NonCallableMagicMock(), NonCallableMock(): + self.assertRaises(TypeError, mock) + self.assertFalse(hasattr(mock, '__call__')) + self.assertIn(mock.__class__.__name__, repr(mock)) + + + def test_heirarchy(self): + self.assertTrue(issubclass(MagicMock, Mock)) + self.assertTrue(issubclass(NonCallableMagicMock, NonCallableMock)) + + + def test_attributes(self): + one = NonCallableMock() + self.assertTrue(issubclass(type(one.one), Mock)) + + two = NonCallableMagicMock() + self.assertTrue(issubclass(type(two.two), MagicMock)) + + + def test_subclasses(self): + class MockSub(Mock): + pass + + one = MockSub() + self.assertTrue(issubclass(type(one.one), MockSub)) + + class MagicSub(MagicMock): + pass + + two = MagicSub() + self.assertTrue(issubclass(type(two.two), MagicSub)) + + + def test_patch_spec(self): + patcher = patch('%s.X' % __name__, spec=True) + mock = patcher.start() + self.addCleanup(patcher.stop) + + instance = mock() + mock.assert_called_once_with() + + self.assertNotCallable(instance) + self.assertRaises(TypeError, instance) + + + def test_patch_spec_set(self): + patcher = patch('%s.X' % __name__, spec_set=True) + mock = patcher.start() + self.addCleanup(patcher.stop) + + instance = mock() + mock.assert_called_once_with() + + self.assertNotCallable(instance) + self.assertRaises(TypeError, instance) + + + def test_patch_spec_instance(self): + patcher = patch('%s.X' % __name__, spec=X()) + mock = patcher.start() + self.addCleanup(patcher.stop) + + self.assertNotCallable(mock) + self.assertRaises(TypeError, mock) + + + def test_patch_spec_set_instance(self): + patcher = patch('%s.X' % __name__, spec_set=X()) + mock = patcher.start() + self.addCleanup(patcher.stop) + + self.assertNotCallable(mock) + self.assertRaises(TypeError, mock) + + + def test_patch_spec_callable_class(self): + class CallableX(X): + def __call__(self): + pass + + class Sub(CallableX): + pass + + class Multi(SomeClass, Sub): + pass + + class OldStyle: + def __call__(self): + pass + + class OldStyleSub(OldStyle): + pass + + for arg in 'spec', 'spec_set': + for Klass in CallableX, Sub, Multi, OldStyle, OldStyleSub: + with patch('%s.X' % __name__, **{arg: Klass}) as mock: + instance = mock() + mock.assert_called_once_with() + + self.assertTrue(is_instance(instance, MagicMock)) + # inherited spec + self.assertRaises(AttributeError, getattr, instance, + 'foobarbaz') + + result = instance() + # instance is callable, result has no spec + instance.assert_called_once_with() + + result(3, 2, 1) + result.assert_called_once_with(3, 2, 1) + result.foo(3, 2, 1) + result.foo.assert_called_once_with(3, 2, 1) + + + def test_create_autospec(self): + mock = create_autospec(X) + instance = mock() + self.assertRaises(TypeError, instance) + + mock = create_autospec(X()) + self.assertRaises(TypeError, mock) + + + def test_create_autospec_instance(self): + mock = create_autospec(SomeClass, instance=True) + + self.assertRaises(TypeError, mock) + mock.wibble() + mock.wibble.assert_called_once_with() + + self.assertRaises(TypeError, mock.wibble, 'some', 'args') + + +if __name__ == "__main__": + unittest.main() diff --git a/ext/mock/tests/testhelpers.py b/ext/mock/tests/testhelpers.py new file mode 100644 index 0000000000..a87df1b3b2 --- /dev/null +++ b/ext/mock/tests/testhelpers.py @@ -0,0 +1,975 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +import six +import unittest2 as unittest + +from mock import ( + call, create_autospec, MagicMock, + Mock, ANY, patch, PropertyMock +) +from mock.mock import _Call, _CallList + +from datetime import datetime + +class SomeClass(object): + def one(self, a, b): + pass + def two(self): + pass + def three(self, a=None): + pass + + + +class AnyTest(unittest.TestCase): + + def test_any(self): + self.assertEqual(ANY, object()) + + mock = Mock() + mock(ANY) + mock.assert_called_with(ANY) + + mock = Mock() + mock(foo=ANY) + mock.assert_called_with(foo=ANY) + + def test_repr(self): + self.assertEqual(repr(ANY), '') + self.assertEqual(str(ANY), '') + + + def test_any_and_datetime(self): + mock = Mock() + mock(datetime.now(), foo=datetime.now()) + + mock.assert_called_with(ANY, foo=ANY) + + + def test_any_mock_calls_comparison_order(self): + mock = Mock() + d = datetime.now() + class Foo(object): + def __eq__(self, other): + return False + def __ne__(self, other): + return True + + for d in datetime.now(), Foo(): + mock.reset_mock() + + mock(d, foo=d, bar=d) + mock.method(d, zinga=d, alpha=d) + mock().method(a1=d, z99=d) + + expected = [ + call(ANY, foo=ANY, bar=ANY), + call.method(ANY, zinga=ANY, alpha=ANY), + call(), call().method(a1=ANY, z99=ANY) + ] + self.assertEqual(expected, mock.mock_calls) + self.assertEqual(mock.mock_calls, expected) + + + +class CallTest(unittest.TestCase): + + def test_call_with_call(self): + kall = _Call() + self.assertEqual(kall, _Call()) + self.assertEqual(kall, _Call(('',))) + self.assertEqual(kall, _Call(((),))) + self.assertEqual(kall, _Call(({},))) + self.assertEqual(kall, _Call(('', ()))) + self.assertEqual(kall, _Call(('', {}))) + self.assertEqual(kall, _Call(('', (), {}))) + self.assertEqual(kall, _Call(('foo',))) + self.assertEqual(kall, _Call(('bar', ()))) + self.assertEqual(kall, _Call(('baz', {}))) + self.assertEqual(kall, _Call(('spam', (), {}))) + + kall = _Call(((1, 2, 3),)) + self.assertEqual(kall, _Call(((1, 2, 3),))) + self.assertEqual(kall, _Call(('', (1, 2, 3)))) + self.assertEqual(kall, _Call(((1, 2, 3), {}))) + self.assertEqual(kall, _Call(('', (1, 2, 3), {}))) + + kall = _Call(((1, 2, 4),)) + self.assertNotEqual(kall, _Call(('', (1, 2, 3)))) + self.assertNotEqual(kall, _Call(('', (1, 2, 3), {}))) + + kall = _Call(('foo', (1, 2, 4),)) + self.assertNotEqual(kall, _Call(('', (1, 2, 4)))) + self.assertNotEqual(kall, _Call(('', (1, 2, 4), {}))) + self.assertNotEqual(kall, _Call(('bar', (1, 2, 4)))) + self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {}))) + + kall = _Call(({'a': 3},)) + self.assertEqual(kall, _Call(('', (), {'a': 3}))) + self.assertEqual(kall, _Call(('', {'a': 3}))) + self.assertEqual(kall, _Call(((), {'a': 3}))) + self.assertEqual(kall, _Call(({'a': 3},))) + + + def test_empty__Call(self): + args = _Call() + + self.assertEqual(args, ()) + self.assertEqual(args, ('foo',)) + self.assertEqual(args, ((),)) + self.assertEqual(args, ('foo', ())) + self.assertEqual(args, ('foo',(), {})) + self.assertEqual(args, ('foo', {})) + self.assertEqual(args, ({},)) + + + def test_named_empty_call(self): + args = _Call(('foo', (), {})) + + self.assertEqual(args, ('foo',)) + self.assertEqual(args, ('foo', ())) + self.assertEqual(args, ('foo',(), {})) + self.assertEqual(args, ('foo', {})) + + self.assertNotEqual(args, ((),)) + self.assertNotEqual(args, ()) + self.assertNotEqual(args, ({},)) + self.assertNotEqual(args, ('bar',)) + self.assertNotEqual(args, ('bar', ())) + self.assertNotEqual(args, ('bar', {})) + + + def test_call_with_args(self): + args = _Call(((1, 2, 3), {})) + + self.assertEqual(args, ((1, 2, 3),)) + self.assertEqual(args, ('foo', (1, 2, 3))) + self.assertEqual(args, ('foo', (1, 2, 3), {})) + self.assertEqual(args, ((1, 2, 3), {})) + + + def test_named_call_with_args(self): + args = _Call(('foo', (1, 2, 3), {})) + + self.assertEqual(args, ('foo', (1, 2, 3))) + self.assertEqual(args, ('foo', (1, 2, 3), {})) + + self.assertNotEqual(args, ((1, 2, 3),)) + self.assertNotEqual(args, ((1, 2, 3), {})) + + + def test_call_with_kwargs(self): + args = _Call(((), dict(a=3, b=4))) + + self.assertEqual(args, (dict(a=3, b=4),)) + self.assertEqual(args, ('foo', dict(a=3, b=4))) + self.assertEqual(args, ('foo', (), dict(a=3, b=4))) + self.assertEqual(args, ((), dict(a=3, b=4))) + + + def test_named_call_with_kwargs(self): + args = _Call(('foo', (), dict(a=3, b=4))) + + self.assertEqual(args, ('foo', dict(a=3, b=4))) + self.assertEqual(args, ('foo', (), dict(a=3, b=4))) + + self.assertNotEqual(args, (dict(a=3, b=4),)) + self.assertNotEqual(args, ((), dict(a=3, b=4))) + + + def test_call_with_args_call_empty_name(self): + args = _Call(((1, 2, 3), {})) + self.assertEqual(args, call(1, 2, 3)) + self.assertEqual(call(1, 2, 3), args) + self.assertIn(call(1, 2, 3), [args]) + + + def test_call_ne(self): + self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2)) + self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3)) + self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3)) + + + def test_call_non_tuples(self): + kall = _Call(((1, 2, 3),)) + for value in 1, None, self, int: + self.assertNotEqual(kall, value) + self.assertFalse(kall == value) + + + def test_repr(self): + self.assertEqual(repr(_Call()), 'call()') + self.assertEqual(repr(_Call(('foo',))), 'call.foo()') + + self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))), + "call(1, 2, 3, a='b')") + self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))), + "call.bar(1, 2, 3, a='b')") + + self.assertEqual(repr(call), 'call') + self.assertEqual(str(call), 'call') + + self.assertEqual(repr(call()), 'call()') + self.assertEqual(repr(call(1)), 'call(1)') + self.assertEqual(repr(call(zz='thing')), "call(zz='thing')") + + self.assertEqual(repr(call().foo), 'call().foo') + self.assertEqual(repr(call(1).foo.bar(a=3).bing), + 'call().foo.bar().bing') + self.assertEqual( + repr(call().foo(1, 2, a=3)), + "call().foo(1, 2, a=3)" + ) + self.assertEqual(repr(call()()), "call()()") + self.assertEqual(repr(call(1)(2)), "call()(2)") + self.assertEqual( + repr(call()().bar().baz.beep(1)), + "call()().bar().baz.beep(1)" + ) + + + def test_call(self): + self.assertEqual(call(), ('', (), {})) + self.assertEqual(call('foo', 'bar', one=3, two=4), + ('', ('foo', 'bar'), {'one': 3, 'two': 4})) + + mock = Mock() + mock(1, 2, 3) + mock(a=3, b=6) + self.assertEqual(mock.call_args_list, + [call(1, 2, 3), call(a=3, b=6)]) + + def test_attribute_call(self): + self.assertEqual(call.foo(1), ('foo', (1,), {})) + self.assertEqual(call.bar.baz(fish='eggs'), + ('bar.baz', (), {'fish': 'eggs'})) + + mock = Mock() + mock.foo(1, 2 ,3) + mock.bar.baz(a=3, b=6) + self.assertEqual(mock.method_calls, + [call.foo(1, 2, 3), call.bar.baz(a=3, b=6)]) + + + def test_extended_call(self): + result = call(1).foo(2).bar(3, a=4) + self.assertEqual(result, ('().foo().bar', (3,), dict(a=4))) + + mock = MagicMock() + mock(1, 2, a=3, b=4) + self.assertEqual(mock.call_args, call(1, 2, a=3, b=4)) + self.assertNotEqual(mock.call_args, call(1, 2, 3)) + + self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)]) + self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)]) + + mock = MagicMock() + mock.foo(1).bar()().baz.beep(a=6) + + last_call = call.foo(1).bar()().baz.beep(a=6) + self.assertEqual(mock.mock_calls[-1], last_call) + self.assertEqual(mock.mock_calls, last_call.call_list()) + + + def test_call_list(self): + mock = MagicMock() + mock(1) + self.assertEqual(call(1).call_list(), mock.mock_calls) + + mock = MagicMock() + mock(1).method(2) + self.assertEqual(call(1).method(2).call_list(), + mock.mock_calls) + + mock = MagicMock() + mock(1).method(2)(3) + self.assertEqual(call(1).method(2)(3).call_list(), + mock.mock_calls) + + mock = MagicMock() + int(mock(1).method(2)(3).foo.bar.baz(4)(5)) + kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__() + self.assertEqual(kall.call_list(), mock.mock_calls) + + + def test_call_any(self): + self.assertEqual(call, ANY) + + m = MagicMock() + int(m) + self.assertEqual(m.mock_calls, [ANY]) + self.assertEqual([ANY], m.mock_calls) + + + def test_two_args_call(self): + args = _Call(((1, 2), {'a': 3}), two=True) + self.assertEqual(len(args), 2) + self.assertEqual(args[0], (1, 2)) + self.assertEqual(args[1], {'a': 3}) + + other_args = _Call(((1, 2), {'a': 3})) + self.assertEqual(args, other_args) + + +class SpecSignatureTest(unittest.TestCase): + + def _check_someclass_mock(self, mock): + self.assertRaises(AttributeError, getattr, mock, 'foo') + mock.one(1, 2) + mock.one.assert_called_with(1, 2) + self.assertRaises(AssertionError, + mock.one.assert_called_with, 3, 4) + self.assertRaises(TypeError, mock.one, 1) + + mock.two() + mock.two.assert_called_with() + self.assertRaises(AssertionError, + mock.two.assert_called_with, 3) + self.assertRaises(TypeError, mock.two, 1) + + mock.three() + mock.three.assert_called_with() + self.assertRaises(AssertionError, + mock.three.assert_called_with, 3) + self.assertRaises(TypeError, mock.three, 3, 2) + + mock.three(1) + mock.three.assert_called_with(1) + + mock.three(a=1) + mock.three.assert_called_with(a=1) + + + def test_basic(self): + mock = create_autospec(SomeClass) + self._check_someclass_mock(mock) + mock = create_autospec(SomeClass()) + self._check_someclass_mock(mock) + + + def test_create_autospec_return_value(self): + def f(): + pass + mock = create_autospec(f, return_value='foo') + self.assertEqual(mock(), 'foo') + + class Foo(object): + pass + + mock = create_autospec(Foo, return_value='foo') + self.assertEqual(mock(), 'foo') + + + def test_autospec_reset_mock(self): + m = create_autospec(int) + int(m) + m.reset_mock() + self.assertEqual(m.__int__.call_count, 0) + + + def test_mocking_unbound_methods(self): + class Foo(object): + def foo(self, foo): + pass + p = patch.object(Foo, 'foo') + mock_foo = p.start() + Foo().foo(1) + + mock_foo.assert_called_with(1) + + + @unittest.expectedFailure + def test_create_autospec_unbound_methods(self): + # see mock issue 128 + class Foo(object): + def foo(self): + pass + + klass = create_autospec(Foo) + instance = klass() + self.assertRaises(TypeError, instance.foo, 1) + + # Note: no type checking on the "self" parameter + klass.foo(1) + klass.foo.assert_called_with(1) + self.assertRaises(TypeError, klass.foo) + + + def test_create_autospec_keyword_arguments(self): + class Foo(object): + a = 3 + m = create_autospec(Foo, a='3') + self.assertEqual(m.a, '3') + + @unittest.skipUnless(six.PY3, "Keyword only arguments Python 3 specific") + def test_create_autospec_keyword_only_arguments(self): + func_def = "def foo(a, *, b=None):\n pass\n" + namespace = {} + exec (func_def, namespace) + foo = namespace['foo'] + + m = create_autospec(foo) + m(1) + m.assert_called_with(1) + self.assertRaises(TypeError, m, 1, 2) + + m(2, b=3) + m.assert_called_with(2, b=3) + + def test_function_as_instance_attribute(self): + obj = SomeClass() + def f(a): + pass + obj.f = f + + mock = create_autospec(obj) + mock.f('bing') + mock.f.assert_called_with('bing') + + + def test_spec_as_list(self): + # because spec as a list of strings in the mock constructor means + # something very different we treat a list instance as the type. + mock = create_autospec([]) + mock.append('foo') + mock.append.assert_called_with('foo') + + self.assertRaises(AttributeError, getattr, mock, 'foo') + + class Foo(object): + foo = [] + + mock = create_autospec(Foo) + mock.foo.append(3) + mock.foo.append.assert_called_with(3) + self.assertRaises(AttributeError, getattr, mock.foo, 'foo') + + + def test_attributes(self): + class Sub(SomeClass): + attr = SomeClass() + + sub_mock = create_autospec(Sub) + + for mock in (sub_mock, sub_mock.attr): + self._check_someclass_mock(mock) + + + def test_builtin_functions_types(self): + # we could replace builtin functions / methods with a function + # with *args / **kwargs signature. Using the builtin method type + # as a spec seems to work fairly well though. + class BuiltinSubclass(list): + def bar(self, arg): + pass + sorted = sorted + attr = {} + + mock = create_autospec(BuiltinSubclass) + mock.append(3) + mock.append.assert_called_with(3) + self.assertRaises(AttributeError, getattr, mock.append, 'foo') + + mock.bar('foo') + mock.bar.assert_called_with('foo') + self.assertRaises(TypeError, mock.bar, 'foo', 'bar') + self.assertRaises(AttributeError, getattr, mock.bar, 'foo') + + mock.sorted([1, 2]) + mock.sorted.assert_called_with([1, 2]) + self.assertRaises(AttributeError, getattr, mock.sorted, 'foo') + + mock.attr.pop(3) + mock.attr.pop.assert_called_with(3) + self.assertRaises(AttributeError, getattr, mock.attr, 'foo') + + + def test_method_calls(self): + class Sub(SomeClass): + attr = SomeClass() + + mock = create_autospec(Sub) + mock.one(1, 2) + mock.two() + mock.three(3) + + expected = [call.one(1, 2), call.two(), call.three(3)] + self.assertEqual(mock.method_calls, expected) + + mock.attr.one(1, 2) + mock.attr.two() + mock.attr.three(3) + + expected.extend( + [call.attr.one(1, 2), call.attr.two(), call.attr.three(3)] + ) + self.assertEqual(mock.method_calls, expected) + + + def test_magic_methods(self): + class BuiltinSubclass(list): + attr = {} + + mock = create_autospec(BuiltinSubclass) + self.assertEqual(list(mock), []) + self.assertRaises(TypeError, int, mock) + self.assertRaises(TypeError, int, mock.attr) + self.assertEqual(list(mock), []) + + self.assertIsInstance(mock['foo'], MagicMock) + self.assertIsInstance(mock.attr['foo'], MagicMock) + + + def test_spec_set(self): + class Sub(SomeClass): + attr = SomeClass() + + for spec in (Sub, Sub()): + mock = create_autospec(spec, spec_set=True) + self._check_someclass_mock(mock) + + self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar') + self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar') + + + def test_descriptors(self): + class Foo(object): + @classmethod + def f(cls, a, b): + pass + @staticmethod + def g(a, b): + pass + + class Bar(Foo): + pass + + class Baz(SomeClass, Bar): + pass + + for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()): + mock = create_autospec(spec) + mock.f(1, 2) + mock.f.assert_called_once_with(1, 2) + + mock.g(3, 4) + mock.g.assert_called_once_with(3, 4) + + + @unittest.skipIf(six.PY3, "No old style classes in Python 3") + def test_old_style_classes(self): + class Foo: + def f(self, a, b): + pass + + class Bar(Foo): + g = Foo() + + for spec in (Foo, Foo(), Bar, Bar()): + mock = create_autospec(spec) + mock.f(1, 2) + mock.f.assert_called_once_with(1, 2) + + self.assertRaises(AttributeError, getattr, mock, 'foo') + self.assertRaises(AttributeError, getattr, mock.f, 'foo') + + mock.g.f(1, 2) + mock.g.f.assert_called_once_with(1, 2) + self.assertRaises(AttributeError, getattr, mock.g, 'foo') + + + def test_recursive(self): + class A(object): + def a(self): + pass + foo = 'foo bar baz' + bar = foo + + A.B = A + mock = create_autospec(A) + + mock() + self.assertFalse(mock.B.called) + + mock.a() + mock.B.a() + self.assertEqual(mock.method_calls, [call.a(), call.B.a()]) + + self.assertIs(A.foo, A.bar) + self.assertIsNot(mock.foo, mock.bar) + mock.foo.lower() + self.assertRaises(AssertionError, mock.bar.lower.assert_called_with) + + + def test_spec_inheritance_for_classes(self): + class Foo(object): + def a(self, x): + pass + class Bar(object): + def f(self, y): + pass + + class_mock = create_autospec(Foo) + + self.assertIsNot(class_mock, class_mock()) + + for this_mock in class_mock, class_mock(): + this_mock.a(x=5) + this_mock.a.assert_called_with(x=5) + this_mock.a.assert_called_with(5) + self.assertRaises(TypeError, this_mock.a, 'foo', 'bar') + self.assertRaises(AttributeError, getattr, this_mock, 'b') + + instance_mock = create_autospec(Foo()) + instance_mock.a(5) + instance_mock.a.assert_called_with(5) + instance_mock.a.assert_called_with(x=5) + self.assertRaises(TypeError, instance_mock.a, 'foo', 'bar') + self.assertRaises(AttributeError, getattr, instance_mock, 'b') + + # The return value isn't isn't callable + self.assertRaises(TypeError, instance_mock) + + instance_mock.Bar.f(6) + instance_mock.Bar.f.assert_called_with(6) + instance_mock.Bar.f.assert_called_with(y=6) + self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g') + + instance_mock.Bar().f(6) + instance_mock.Bar().f.assert_called_with(6) + instance_mock.Bar().f.assert_called_with(y=6) + self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g') + + + def test_inherit(self): + class Foo(object): + a = 3 + + Foo.Foo = Foo + + # class + mock = create_autospec(Foo) + instance = mock() + self.assertRaises(AttributeError, getattr, instance, 'b') + + attr_instance = mock.Foo() + self.assertRaises(AttributeError, getattr, attr_instance, 'b') + + # instance + mock = create_autospec(Foo()) + self.assertRaises(AttributeError, getattr, mock, 'b') + self.assertRaises(TypeError, mock) + + # attribute instance + call_result = mock.Foo() + self.assertRaises(AttributeError, getattr, call_result, 'b') + + + def test_builtins(self): + # used to fail with infinite recursion + create_autospec(1) + + create_autospec(int) + create_autospec('foo') + create_autospec(str) + create_autospec({}) + create_autospec(dict) + create_autospec([]) + create_autospec(list) + create_autospec(set()) + create_autospec(set) + create_autospec(1.0) + create_autospec(float) + create_autospec(1j) + create_autospec(complex) + create_autospec(False) + create_autospec(True) + + + def test_function(self): + def f(a, b): + pass + + mock = create_autospec(f) + self.assertRaises(TypeError, mock) + mock(1, 2) + mock.assert_called_with(1, 2) + mock.assert_called_with(1, b=2) + mock.assert_called_with(a=1, b=2) + + f.f = f + mock = create_autospec(f) + self.assertRaises(TypeError, mock.f) + mock.f(3, 4) + mock.f.assert_called_with(3, 4) + mock.f.assert_called_with(a=3, b=4) + + + def test_skip_attributeerrors(self): + class Raiser(object): + def __get__(self, obj, type=None): + if obj is None: + raise AttributeError('Can only be accessed via an instance') + + class RaiserClass(object): + raiser = Raiser() + + @staticmethod + def existing(a, b): + return a + b + + s = create_autospec(RaiserClass) + self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3)) + s.existing(1, 2) + self.assertRaises(AttributeError, lambda: s.nonexisting) + + # check we can fetch the raiser attribute and it has no spec + obj = s.raiser + obj.foo, obj.bar + + + def test_signature_class(self): + class Foo(object): + def __init__(self, a, b=3): + pass + + mock = create_autospec(Foo) + + self.assertRaises(TypeError, mock) + mock(1) + mock.assert_called_once_with(1) + + mock(4, 5) + mock.assert_called_with(4, 5) + + + @unittest.skipIf(six.PY3, 'no old style classes in Python 3') + def test_signature_old_style_class(self): + class Foo: + def __init__(self, a, b=3): + pass + + mock = create_autospec(Foo) + + self.assertRaises(TypeError, mock) + mock(1) + mock.assert_called_once_with(1) + mock.assert_called_once_with(a=1) + self.assertRaises(AssertionError, mock.assert_called_once_with, 2) + + mock(4, 5) + mock.assert_called_with(4, 5) + mock.assert_called_with(a=4, b=5) + self.assertRaises(AssertionError, mock.assert_called_with, a=5, b=4) + + + def test_class_with_no_init(self): + # this used to raise an exception + # due to trying to get a signature from object.__init__ + class Foo(object): + pass + create_autospec(Foo) + + + @unittest.skipIf(six.PY3, 'no old style classes in Python 3') + def test_old_style_class_with_no_init(self): + # this used to raise an exception + # due to Foo.__init__ raising an AttributeError + class Foo: + pass + create_autospec(Foo) + + + def test_signature_callable(self): + class Callable(object): + def __init__(self, x, y): + pass + def __call__(self, a): + pass + + mock = create_autospec(Callable) + mock(1, 2) + mock.assert_called_once_with(1, 2) + mock.assert_called_once_with(x=1, y=2) + self.assertRaises(TypeError, mock, 'a') + + instance = mock(1, 2) + self.assertRaises(TypeError, instance) + instance(a='a') + instance.assert_called_once_with('a') + instance.assert_called_once_with(a='a') + instance('a') + instance.assert_called_with('a') + instance.assert_called_with(a='a') + + mock = create_autospec(Callable(1, 2)) + mock(a='a') + mock.assert_called_once_with(a='a') + self.assertRaises(TypeError, mock) + mock('a') + mock.assert_called_with('a') + + + def test_signature_noncallable(self): + class NonCallable(object): + def __init__(self): + pass + + mock = create_autospec(NonCallable) + instance = mock() + mock.assert_called_once_with() + self.assertRaises(TypeError, mock, 'a') + self.assertRaises(TypeError, instance) + self.assertRaises(TypeError, instance, 'a') + + mock = create_autospec(NonCallable()) + self.assertRaises(TypeError, mock) + self.assertRaises(TypeError, mock, 'a') + + + def test_create_autospec_none(self): + class Foo(object): + bar = None + + mock = create_autospec(Foo) + none = mock.bar + self.assertNotIsInstance(none, type(None)) + + none.foo() + none.foo.assert_called_once_with() + + + def test_autospec_functions_with_self_in_odd_place(self): + class Foo(object): + def f(a, self): + pass + + a = create_autospec(Foo) + a.f(10) + a.f.assert_called_with(10) + a.f.assert_called_with(self=10) + a.f(self=10) + a.f.assert_called_with(10) + a.f.assert_called_with(self=10) + + + def test_autospec_property(self): + class Foo(object): + @property + def foo(self): + return 3 + + foo = create_autospec(Foo) + mock_property = foo.foo + + # no spec on properties + self.assertIsInstance(mock_property, MagicMock) + mock_property(1, 2, 3) + mock_property.abc(4, 5, 6) + mock_property.assert_called_once_with(1, 2, 3) + mock_property.abc.assert_called_once_with(4, 5, 6) + + + def test_autospec_slots(self): + class Foo(object): + __slots__ = ['a'] + + foo = create_autospec(Foo) + mock_slot = foo.a + + # no spec on slots + mock_slot(1, 2, 3) + mock_slot.abc(4, 5, 6) + mock_slot.assert_called_once_with(1, 2, 3) + mock_slot.abc.assert_called_once_with(4, 5, 6) + + +class TestCallList(unittest.TestCase): + + def test_args_list_contains_call_list(self): + mock = Mock() + self.assertIsInstance(mock.call_args_list, _CallList) + + mock(1, 2) + mock(a=3) + mock(3, 4) + mock(b=6) + + for kall in call(1, 2), call(a=3), call(3, 4), call(b=6): + self.assertIn(kall, mock.call_args_list) + + calls = [call(a=3), call(3, 4)] + self.assertIn(calls, mock.call_args_list) + calls = [call(1, 2), call(a=3)] + self.assertIn(calls, mock.call_args_list) + calls = [call(3, 4), call(b=6)] + self.assertIn(calls, mock.call_args_list) + calls = [call(3, 4)] + self.assertIn(calls, mock.call_args_list) + + self.assertNotIn(call('fish'), mock.call_args_list) + self.assertNotIn([call('fish')], mock.call_args_list) + + + def test_call_list_str(self): + mock = Mock() + mock(1, 2) + mock.foo(a=3) + mock.foo.bar().baz('fish', cat='dog') + + expected = ( + "[call(1, 2),\n" + " call.foo(a=3),\n" + " call.foo.bar(),\n" + " call.foo.bar().baz('fish', cat='dog')]" + ) + self.assertEqual(str(mock.mock_calls), expected) + + + @unittest.skipIf(six.PY3, "Unicode is properly handled with Python 3") + def test_call_list_unicode(self): + # See github issue #328 + mock = Mock() + + class NonAsciiRepr(object): + def __repr__(self): + return "\xe9" + + mock(**{unicode("a"): NonAsciiRepr()}) + + self.assertEqual(str(mock.mock_calls), "[call(a=\xe9)]") + + + def test_propertymock(self): + p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock) + mock = p.start() + try: + SomeClass.one + mock.assert_called_once_with() + + s = SomeClass() + s.one + mock.assert_called_with() + self.assertEqual(mock.mock_calls, [call(), call()]) + + s.one = 3 + self.assertEqual(mock.mock_calls, [call(), call(), call(3)]) + finally: + p.stop() + + + def test_propertymock_returnvalue(self): + m = MagicMock() + p = PropertyMock() + type(m).foo = p + + returned = m.foo + p.assert_called_once_with() + self.assertIsInstance(returned, MagicMock) + self.assertNotIsInstance(returned, PropertyMock) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/testmagicmethods.py b/ext/mock/tests/testmagicmethods.py new file mode 100644 index 0000000000..f47a20256f --- /dev/null +++ b/ext/mock/tests/testmagicmethods.py @@ -0,0 +1,533 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +from __future__ import division + +try: + unicode +except NameError: + # Python 3 + unicode = str + long = int + +import inspect +import sys +import textwrap + +import six +import unittest2 as unittest + +from mock import Mock, MagicMock +from mock.mock import _magics + + + +class TestMockingMagicMethods(unittest.TestCase): + + def test_deleting_magic_methods(self): + mock = Mock() + self.assertFalse(hasattr(mock, '__getitem__')) + + mock.__getitem__ = Mock() + self.assertTrue(hasattr(mock, '__getitem__')) + + del mock.__getitem__ + self.assertFalse(hasattr(mock, '__getitem__')) + + + def test_magicmock_del(self): + mock = MagicMock() + # before using getitem + del mock.__getitem__ + self.assertRaises(TypeError, lambda: mock['foo']) + + mock = MagicMock() + # this time use it first + mock['foo'] + del mock.__getitem__ + self.assertRaises(TypeError, lambda: mock['foo']) + + + def test_magic_method_wrapping(self): + mock = Mock() + def f(self, name): + return self, 'fish' + + mock.__getitem__ = f + self.assertIsNot(mock.__getitem__, f) + self.assertEqual(mock['foo'], (mock, 'fish')) + self.assertEqual(mock.__getitem__('foo'), (mock, 'fish')) + + mock.__getitem__ = mock + self.assertIs(mock.__getitem__, mock) + + + def test_magic_methods_isolated_between_mocks(self): + mock1 = Mock() + mock2 = Mock() + + mock1.__iter__ = Mock(return_value=iter([])) + self.assertEqual(list(mock1), []) + self.assertRaises(TypeError, lambda: list(mock2)) + + + def test_repr(self): + mock = Mock() + self.assertEqual(repr(mock), "" % id(mock)) + mock.__repr__ = lambda s: 'foo' + self.assertEqual(repr(mock), 'foo') + + + def test_str(self): + mock = Mock() + self.assertEqual(str(mock), object.__str__(mock)) + mock.__str__ = lambda s: 'foo' + self.assertEqual(str(mock), 'foo') + + + @unittest.skipIf(six.PY3, "no unicode in Python 3") + def test_unicode(self): + mock = Mock() + self.assertEqual(unicode(mock), unicode(str(mock))) + + mock.__unicode__ = lambda s: unicode('foo') + self.assertEqual(unicode(mock), unicode('foo')) + + + def test_dict_methods(self): + mock = Mock() + + self.assertRaises(TypeError, lambda: mock['foo']) + def _del(): + del mock['foo'] + def _set(): + mock['foo'] = 3 + self.assertRaises(TypeError, _del) + self.assertRaises(TypeError, _set) + + _dict = {} + def getitem(s, name): + return _dict[name] + def setitem(s, name, value): + _dict[name] = value + def delitem(s, name): + del _dict[name] + + mock.__setitem__ = setitem + mock.__getitem__ = getitem + mock.__delitem__ = delitem + + self.assertRaises(KeyError, lambda: mock['foo']) + mock['foo'] = 'bar' + self.assertEqual(_dict, {'foo': 'bar'}) + self.assertEqual(mock['foo'], 'bar') + del mock['foo'] + self.assertEqual(_dict, {}) + + + def test_numeric(self): + original = mock = Mock() + mock.value = 0 + + self.assertRaises(TypeError, lambda: mock + 3) + + def add(self, other): + mock.value += other + return self + mock.__add__ = add + self.assertEqual(mock + 3, mock) + self.assertEqual(mock.value, 3) + + del mock.__add__ + def iadd(mock): + mock += 3 + self.assertRaises(TypeError, iadd, mock) + mock.__iadd__ = add + mock += 6 + self.assertEqual(mock, original) + self.assertEqual(mock.value, 9) + + self.assertRaises(TypeError, lambda: 3 + mock) + mock.__radd__ = add + self.assertEqual(7 + mock, mock) + self.assertEqual(mock.value, 16) + + def test_division(self): + original = mock = Mock() + mock.value = 32 + self.assertRaises(TypeError, lambda: mock / 2) + + def truediv(self, other): + mock.value /= other + return self + mock.__truediv__ = truediv + self.assertEqual(mock / 2, mock) + self.assertEqual(mock.value, 16) + + del mock.__truediv__ + if six.PY3: + def itruediv(mock): + mock /= 4 + self.assertRaises(TypeError, itruediv, mock) + mock.__itruediv__ = truediv + mock /= 8 + self.assertEqual(mock, original) + self.assertEqual(mock.value, 2) + else: + mock.value = 2 + + self.assertRaises(TypeError, lambda: 8 / mock) + mock.__rtruediv__ = truediv + self.assertEqual(0.5 / mock, mock) + self.assertEqual(mock.value, 4) + + def test_hash(self): + mock = Mock() + # test delegation + self.assertEqual(hash(mock), Mock.__hash__(mock)) + + def _hash(s): + return 3 + mock.__hash__ = _hash + self.assertEqual(hash(mock), 3) + + + def test_nonzero(self): + m = Mock() + self.assertTrue(bool(m)) + + nonzero = lambda s: False + if six.PY2: + m.__nonzero__ = nonzero + else: + m.__bool__ = nonzero + + self.assertFalse(bool(m)) + + + def test_comparison(self): + mock = Mock() + def comp(s, o): + return True + mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp + self. assertTrue(mock < 3) + self. assertTrue(mock > 3) + self. assertTrue(mock <= 3) + self. assertTrue(mock >= 3) + + if six.PY2: + # incomparable in Python 3 + self.assertEqual(Mock() < 3, object() < 3) + self.assertEqual(Mock() > 3, object() > 3) + self.assertEqual(Mock() <= 3, object() <= 3) + self.assertEqual(Mock() >= 3, object() >= 3) + else: + self.assertRaises(TypeError, lambda: MagicMock() < object()) + self.assertRaises(TypeError, lambda: object() < MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() < MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() > object()) + self.assertRaises(TypeError, lambda: object() > MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() > MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() <= object()) + self.assertRaises(TypeError, lambda: object() <= MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() >= object()) + self.assertRaises(TypeError, lambda: object() >= MagicMock()) + self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock()) + + + def test_equality(self): + for mock in Mock(), MagicMock(): + self.assertEqual(mock == mock, True) + self.assertIsInstance(mock == mock, bool) + self.assertEqual(mock != mock, False) + self.assertIsInstance(mock != mock, bool) + self.assertEqual(mock == object(), False) + self.assertEqual(mock != object(), True) + + def eq(self, other): + return other == 3 + mock.__eq__ = eq + self.assertTrue(mock == 3) + self.assertFalse(mock == 4) + + def ne(self, other): + return other == 3 + mock.__ne__ = ne + self.assertTrue(mock != 3) + self.assertFalse(mock != 4) + + mock = MagicMock() + mock.__eq__.return_value = True + self.assertIsInstance(mock == 3, bool) + self.assertEqual(mock == 3, True) + + mock.__ne__.return_value = False + self.assertIsInstance(mock != 3, bool) + self.assertEqual(mock != 3, False) + + + def test_len_contains_iter(self): + mock = Mock() + + self.assertRaises(TypeError, len, mock) + self.assertRaises(TypeError, iter, mock) + self.assertRaises(TypeError, lambda: 'foo' in mock) + + mock.__len__ = lambda s: 6 + self.assertEqual(len(mock), 6) + + mock.__contains__ = lambda s, o: o == 3 + self.assertIn(3, mock) + self.assertNotIn(6, mock) + + mock.__iter__ = lambda s: iter('foobarbaz') + self.assertEqual(list(mock), list('foobarbaz')) + + + def test_magicmock(self): + mock = MagicMock() + + mock.__iter__.return_value = iter([1, 2, 3]) + self.assertEqual(list(mock), [1, 2, 3]) + + name = '__nonzero__' + other = '__bool__' + if six.PY3: + name, other = other, name + getattr(mock, name).return_value = False + self.assertFalse(hasattr(mock, other)) + self.assertFalse(bool(mock)) + + for entry in _magics: + self.assertTrue(hasattr(mock, entry)) + self.assertFalse(hasattr(mock, '__imaginery__')) + + + def test_magic_mock_equality(self): + mock = MagicMock() + self.assertIsInstance(mock == object(), bool) + self.assertIsInstance(mock != object(), bool) + + self.assertEqual(mock == object(), False) + self.assertEqual(mock != object(), True) + self.assertEqual(mock == mock, True) + self.assertEqual(mock != mock, False) + + + def test_magicmock_defaults(self): + mock = MagicMock() + self.assertEqual(int(mock), 1) + self.assertEqual(complex(mock), 1j) + self.assertEqual(float(mock), 1.0) + self.assertEqual(long(mock), long(1)) + self.assertNotIn(object(), mock) + self.assertEqual(len(mock), 0) + self.assertEqual(list(mock), []) + self.assertEqual(hash(mock), object.__hash__(mock)) + self.assertEqual(str(mock), object.__str__(mock)) + self.assertEqual(unicode(mock), object.__str__(mock)) + self.assertIsInstance(unicode(mock), unicode) + self.assertTrue(bool(mock)) + if six.PY2: + self.assertEqual(oct(mock), '1') + else: + # in Python 3 oct and hex use __index__ + # so these tests are for __index__ in py3k + self.assertEqual(oct(mock), '0o1') + self.assertEqual(hex(mock), '0x1') + # how to test __sizeof__ ? + + + @unittest.skipIf(six.PY3, "no __cmp__ in Python 3") + def test_non_default_magic_methods(self): + mock = MagicMock() + self.assertRaises(AttributeError, lambda: mock.__cmp__) + + mock = Mock() + mock.__cmp__ = lambda s, o: 0 + + self.assertEqual(mock, object()) + + + def test_magic_methods_and_spec(self): + class Iterable(object): + def __iter__(self): + pass + + mock = Mock(spec=Iterable) + self.assertRaises(AttributeError, lambda: mock.__iter__) + + mock.__iter__ = Mock(return_value=iter([])) + self.assertEqual(list(mock), []) + + class NonIterable(object): + pass + mock = Mock(spec=NonIterable) + self.assertRaises(AttributeError, lambda: mock.__iter__) + + def set_int(): + mock.__int__ = Mock(return_value=iter([])) + self.assertRaises(AttributeError, set_int) + + mock = MagicMock(spec=Iterable) + self.assertEqual(list(mock), []) + self.assertRaises(AttributeError, set_int) + + + def test_magic_methods_and_spec_set(self): + class Iterable(object): + def __iter__(self): + pass + + mock = Mock(spec_set=Iterable) + self.assertRaises(AttributeError, lambda: mock.__iter__) + + mock.__iter__ = Mock(return_value=iter([])) + self.assertEqual(list(mock), []) + + class NonIterable(object): + pass + mock = Mock(spec_set=NonIterable) + self.assertRaises(AttributeError, lambda: mock.__iter__) + + def set_int(): + mock.__int__ = Mock(return_value=iter([])) + self.assertRaises(AttributeError, set_int) + + mock = MagicMock(spec_set=Iterable) + self.assertEqual(list(mock), []) + self.assertRaises(AttributeError, set_int) + + + def test_setting_unsupported_magic_method(self): + mock = MagicMock() + def set_setattr(): + mock.__setattr__ = lambda self, name: None + self.assertRaisesRegex(AttributeError, + "Attempting to set unsupported magic method '__setattr__'.", + set_setattr + ) + + + def test_attributes_and_return_value(self): + mock = MagicMock() + attr = mock.foo + def _get_type(obj): + # the type of every mock (or magicmock) is a custom subclass + # so the real type is the second in the mro + return type(obj).__mro__[1] + self.assertEqual(_get_type(attr), MagicMock) + + returned = mock() + self.assertEqual(_get_type(returned), MagicMock) + + + def test_magic_methods_are_magic_mocks(self): + mock = MagicMock() + self.assertIsInstance(mock.__getitem__, MagicMock) + + mock[1][2].__getitem__.return_value = 3 + self.assertEqual(mock[1][2][3], 3) + + + def test_magic_method_reset_mock(self): + mock = MagicMock() + str(mock) + self.assertTrue(mock.__str__.called) + mock.reset_mock() + self.assertFalse(mock.__str__.called) + + def test_dir(self): + # overriding the default implementation + for mock in Mock(), MagicMock(): + def _dir(self): + return ['foo'] + mock.__dir__ = _dir + self.assertEqual(dir(mock), ['foo']) + + + @unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy") + def test_bound_methods(self): + m = Mock() + + # XXXX should this be an expected failure instead? + + # this seems like it should work, but is hard to do without introducing + # other api inconsistencies. Failure message could be better though. + m.__iter__ = [3].__iter__ + self.assertRaises(TypeError, iter, m) + + + def test_magic_method_type(self): + class Foo(MagicMock): + pass + + foo = Foo() + self.assertIsInstance(foo.__int__, Foo) + + + def test_descriptor_from_class(self): + m = MagicMock() + type(m).__str__.return_value = 'foo' + self.assertEqual(str(m), 'foo') + + + def test_iterable_as_iter_return_value(self): + m = MagicMock() + m.__iter__.return_value = [1, 2, 3] + self.assertEqual(list(m), [1, 2, 3]) + self.assertEqual(list(m), [1, 2, 3]) + + m.__iter__.return_value = iter([4, 5, 6]) + self.assertEqual(list(m), [4, 5, 6]) + self.assertEqual(list(m), []) + + @unittest.skipIf(sys.version_info < (3, 5), "@ added in Python 3.5") + def test_matmul(self): + src = textwrap.dedent("""\ + m = MagicMock() + self.assertIsInstance(m @ 1, MagicMock) + m.__matmul__.return_value = 42 + m.__rmatmul__.return_value = 666 + m.__imatmul__.return_value = 24 + self.assertEqual(m @ 1, 42) + self.assertEqual(1 @ m, 666) + m @= 24 + self.assertEqual(m, 24) + """) + exec(src) + + def test_divmod_and_rdivmod(self): + m = MagicMock() + self.assertIsInstance(divmod(5, m), MagicMock) + m.__divmod__.return_value = (2, 1) + self.assertEqual(divmod(m, 2), (2, 1)) + m = MagicMock() + foo = divmod(2, m) + self.assertIsInstance(foo, MagicMock) + foo_direct = m.__divmod__(2) + self.assertIsInstance(foo_direct, MagicMock) + bar = divmod(m, 2) + self.assertIsInstance(bar, MagicMock) + bar_direct = m.__rdivmod__(2) + self.assertIsInstance(bar_direct, MagicMock) + + # http://bugs.python.org/issue23310 + # Check if you can change behaviour of magic methds in MagicMock init + def test_magic_in_initialization(self): + m = MagicMock(**{'__str__.return_value': "12"}) + self.assertEqual(str(m), "12") + + def test_changing_magic_set_in_initialization(self): + m = MagicMock(**{'__str__.return_value': "12"}) + m.__str__.return_value = "13" + self.assertEqual(str(m), "13") + m = MagicMock(**{'__str__.return_value': "12"}) + m.configure_mock(**{'__str__.return_value': "14"}) + self.assertEqual(str(m), "14") + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/testmock.py b/ext/mock/tests/testmock.py new file mode 100644 index 0000000000..7511c23d75 --- /dev/null +++ b/ext/mock/tests/testmock.py @@ -0,0 +1,1593 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +import copy +import pickle +import sys +import tempfile + +import six +import unittest2 as unittest + +import mock +from mock import ( + call, DEFAULT, patch, sentinel, + MagicMock, Mock, NonCallableMock, + NonCallableMagicMock, + create_autospec +) +from mock.mock import _CallList +from mock.tests.support import ( + callable, is_instance, next +) + + +try: + unicode +except NameError: + unicode = str + + +class Iter(object): + def __init__(self): + self.thing = iter(['this', 'is', 'an', 'iter']) + + def __iter__(self): + return self + + def next(self): + return next(self.thing) + + __next__ = next + + +class Something(object): + def meth(self, a, b, c, d=None): + pass + + @classmethod + def cmeth(cls, a, b, c, d=None): + pass + + @staticmethod + def smeth(a, b, c, d=None): + pass + + +class Subclass(MagicMock): + pass + + +class Thing(object): + attribute = 6 + foo = 'bar' + + + +class MockTest(unittest.TestCase): + + def test_all(self): + # if __all__ is badly defined then import * will raise an error + # We have to exec it because you can't import * inside a method + # in Python 3 + exec("from mock import *") + + + def test_constructor(self): + mock = Mock() + + self.assertFalse(mock.called, "called not initialised correctly") + self.assertEqual(mock.call_count, 0, + "call_count not initialised correctly") + self.assertTrue(is_instance(mock.return_value, Mock), + "return_value not initialised correctly") + + self.assertEqual(mock.call_args, None, + "call_args not initialised correctly") + self.assertEqual(mock.call_args_list, [], + "call_args_list not initialised correctly") + self.assertEqual(mock.method_calls, [], + "method_calls not initialised correctly") + + # Can't use hasattr for this test as it always returns True on a mock + self.assertNotIn('_items', mock.__dict__, + "default mock should not have '_items' attribute") + + self.assertIsNone(mock._mock_parent, + "parent not initialised correctly") + self.assertIsNone(mock._mock_methods, + "methods not initialised correctly") + self.assertEqual(mock._mock_children, {}, + "children not initialised incorrectly") + + + def test_return_value_in_constructor(self): + mock = Mock(return_value=None) + self.assertIsNone(mock.return_value, + "return value in constructor not honoured") + + + def test_repr(self): + mock = Mock(name='foo') + self.assertIn('foo', repr(mock)) + self.assertIn("'%s'" % id(mock), repr(mock)) + + mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')] + for mock, name in mocks: + self.assertIn('%s.bar' % name, repr(mock.bar)) + self.assertIn('%s.foo()' % name, repr(mock.foo())) + self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing)) + self.assertIn('%s()' % name, repr(mock())) + self.assertIn('%s()()' % name, repr(mock()())) + self.assertIn('%s()().foo.bar.baz().bing' % name, + repr(mock()().foo.bar.baz().bing)) + + + def test_repr_with_spec(self): + class X(object): + pass + + mock = Mock(spec=X) + self.assertIn(" spec='X' ", repr(mock)) + + mock = Mock(spec=X()) + self.assertIn(" spec='X' ", repr(mock)) + + mock = Mock(spec_set=X) + self.assertIn(" spec_set='X' ", repr(mock)) + + mock = Mock(spec_set=X()) + self.assertIn(" spec_set='X' ", repr(mock)) + + mock = Mock(spec=X, name='foo') + self.assertIn(" spec='X' ", repr(mock)) + self.assertIn(" name='foo' ", repr(mock)) + + mock = Mock(name='foo') + self.assertNotIn("spec", repr(mock)) + + mock = Mock() + self.assertNotIn("spec", repr(mock)) + + mock = Mock(spec=['foo']) + self.assertNotIn("spec", repr(mock)) + + + def test_side_effect(self): + mock = Mock() + + def effect(*args, **kwargs): + raise SystemError('kablooie') + + mock.side_effect = effect + self.assertRaises(SystemError, mock, 1, 2, fish=3) + mock.assert_called_with(1, 2, fish=3) + + results = [1, 2, 3] + def effect(): + return results.pop() + mock.side_effect = effect + + self.assertEqual([mock(), mock(), mock()], [3, 2, 1], + "side effect not used correctly") + + mock = Mock(side_effect=sentinel.SideEffect) + self.assertEqual(mock.side_effect, sentinel.SideEffect, + "side effect in constructor not used") + + def side_effect(): + return DEFAULT + mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN) + self.assertEqual(mock(), sentinel.RETURN) + + def test_autospec_side_effect(self): + # Test for issue17826 + results = [1, 2, 3] + def effect(): + return results.pop() + def f(): + pass + + mock = create_autospec(f) + mock.side_effect = [1, 2, 3] + self.assertEqual([mock(), mock(), mock()], [1, 2, 3], + "side effect not used correctly in create_autospec") + # Test where side effect is a callable + results = [1, 2, 3] + mock = create_autospec(f) + mock.side_effect = effect + self.assertEqual([mock(), mock(), mock()], [3, 2, 1], + "callable side effect not used correctly") + + def test_autospec_side_effect_exception(self): + # Test for issue 23661 + def f(): + pass + + mock = create_autospec(f) + mock.side_effect = ValueError('Bazinga!') + self.assertRaisesRegex(ValueError, 'Bazinga!', mock) + + @unittest.skipUnless('java' in sys.platform, + 'This test only applies to Jython') + def test_java_exception_side_effect(self): + import java + mock = Mock(side_effect=java.lang.RuntimeException("Boom!")) + + # can't use assertRaises with java exceptions + try: + mock(1, 2, fish=3) + except java.lang.RuntimeException: + pass + else: + self.fail('java exception not raised') + mock.assert_called_with(1,2, fish=3) + + + def test_reset_mock(self): + parent = Mock() + spec = ["something"] + mock = Mock(name="child", parent=parent, spec=spec) + mock(sentinel.Something, something=sentinel.SomethingElse) + something = mock.something + mock.something() + mock.side_effect = sentinel.SideEffect + return_value = mock.return_value + return_value() + + mock.reset_mock() + + self.assertEqual(mock._mock_name, "child", + "name incorrectly reset") + self.assertEqual(mock._mock_parent, parent, + "parent incorrectly reset") + self.assertEqual(mock._mock_methods, spec, + "methods incorrectly reset") + + self.assertFalse(mock.called, "called not reset") + self.assertEqual(mock.call_count, 0, "call_count not reset") + self.assertEqual(mock.call_args, None, "call_args not reset") + self.assertEqual(mock.call_args_list, [], "call_args_list not reset") + self.assertEqual(mock.method_calls, [], + "method_calls not initialised correctly: %r != %r" % + (mock.method_calls, [])) + self.assertEqual(mock.mock_calls, []) + + self.assertEqual(mock.side_effect, sentinel.SideEffect, + "side_effect incorrectly reset") + self.assertEqual(mock.return_value, return_value, + "return_value incorrectly reset") + self.assertFalse(return_value.called, "return value mock not reset") + self.assertEqual(mock._mock_children, {'something': something}, + "children reset incorrectly") + self.assertEqual(mock.something, something, + "children incorrectly cleared") + self.assertFalse(mock.something.called, "child not reset") + + + def test_reset_mock_recursion(self): + mock = Mock() + mock.return_value = mock + + # used to cause recursion + mock.reset_mock() + + def test_reset_mock_on_mock_open_issue_18622(self): + a = mock.mock_open() + a.reset_mock() + + def test_call(self): + mock = Mock() + self.assertTrue(is_instance(mock.return_value, Mock), + "Default return_value should be a Mock") + + result = mock() + self.assertEqual(mock(), result, + "different result from consecutive calls") + mock.reset_mock() + + ret_val = mock(sentinel.Arg) + self.assertTrue(mock.called, "called not set") + self.assertEqual(mock.call_count, 1, "call_count incoreect") + self.assertEqual(mock.call_args, ((sentinel.Arg,), {}), + "call_args not set") + self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})], + "call_args_list not initialised correctly") + + mock.return_value = sentinel.ReturnValue + ret_val = mock(sentinel.Arg, key=sentinel.KeyArg) + self.assertEqual(ret_val, sentinel.ReturnValue, + "incorrect return value") + + self.assertEqual(mock.call_count, 2, "call_count incorrect") + self.assertEqual(mock.call_args, + ((sentinel.Arg,), {'key': sentinel.KeyArg}), + "call_args not set") + self.assertEqual(mock.call_args_list, [ + ((sentinel.Arg,), {}), + ((sentinel.Arg,), {'key': sentinel.KeyArg}) + ], + "call_args_list not set") + + + def test_call_args_comparison(self): + mock = Mock() + mock() + mock(sentinel.Arg) + mock(kw=sentinel.Kwarg) + mock(sentinel.Arg, kw=sentinel.Kwarg) + self.assertEqual(mock.call_args_list, [ + (), + ((sentinel.Arg,),), + ({"kw": sentinel.Kwarg},), + ((sentinel.Arg,), {"kw": sentinel.Kwarg}) + ]) + self.assertEqual(mock.call_args, + ((sentinel.Arg,), {"kw": sentinel.Kwarg})) + + # Comparing call_args to a long sequence should not raise + # an exception. See issue 24857. + self.assertFalse(mock.call_args == "a long sequence") + + def test_assert_called_with(self): + mock = Mock() + mock() + + # Will raise an exception if it fails + mock.assert_called_with() + self.assertRaises(AssertionError, mock.assert_called_with, 1) + + mock.reset_mock() + self.assertRaises(AssertionError, mock.assert_called_with) + + mock(1, 2, 3, a='fish', b='nothing') + mock.assert_called_with(1, 2, 3, a='fish', b='nothing') + + + def test_assert_called_with_function_spec(self): + def f(a, b, c, d=None): + pass + + mock = Mock(spec=f) + + mock(1, b=2, c=3) + mock.assert_called_with(1, 2, 3) + mock.assert_called_with(a=1, b=2, c=3) + self.assertRaises(AssertionError, mock.assert_called_with, + 1, b=3, c=2) + # Expected call doesn't match the spec's signature + with self.assertRaises(AssertionError) as cm: + mock.assert_called_with(e=8) + if hasattr(cm.exception, '__cause__'): + self.assertIsInstance(cm.exception.__cause__, TypeError) + + + def test_assert_called_with_method_spec(self): + def _check(mock): + mock(1, b=2, c=3) + mock.assert_called_with(1, 2, 3) + mock.assert_called_with(a=1, b=2, c=3) + self.assertRaises(AssertionError, mock.assert_called_with, + 1, b=3, c=2) + + mock = Mock(spec=Something().meth) + _check(mock) + mock = Mock(spec=Something.cmeth) + _check(mock) + mock = Mock(spec=Something().cmeth) + _check(mock) + mock = Mock(spec=Something.smeth) + _check(mock) + mock = Mock(spec=Something().smeth) + _check(mock) + + + def test_assert_called_once_with(self): + mock = Mock() + mock() + + # Will raise an exception if it fails + mock.assert_called_once_with() + + mock() + self.assertRaises(AssertionError, mock.assert_called_once_with) + + mock.reset_mock() + self.assertRaises(AssertionError, mock.assert_called_once_with) + + mock('foo', 'bar', baz=2) + mock.assert_called_once_with('foo', 'bar', baz=2) + + mock.reset_mock() + mock('foo', 'bar', baz=2) + self.assertRaises( + AssertionError, + lambda: mock.assert_called_once_with('bob', 'bar', baz=2) + ) + + + def test_assert_called_once_with_function_spec(self): + def f(a, b, c, d=None): + pass + + mock = Mock(spec=f) + + mock(1, b=2, c=3) + mock.assert_called_once_with(1, 2, 3) + mock.assert_called_once_with(a=1, b=2, c=3) + self.assertRaises(AssertionError, mock.assert_called_once_with, + 1, b=3, c=2) + # Expected call doesn't match the spec's signature + with self.assertRaises(AssertionError) as cm: + mock.assert_called_once_with(e=8) + if hasattr(cm.exception, '__cause__'): + self.assertIsInstance(cm.exception.__cause__, TypeError) + # Mock called more than once => always fails + mock(4, 5, 6) + self.assertRaises(AssertionError, mock.assert_called_once_with, + 1, 2, 3) + self.assertRaises(AssertionError, mock.assert_called_once_with, + 4, 5, 6) + + + def test_attribute_access_returns_mocks(self): + mock = Mock() + something = mock.something + self.assertTrue(is_instance(something, Mock), "attribute isn't a mock") + self.assertEqual(mock.something, something, + "different attributes returned for same name") + + # Usage example + mock = Mock() + mock.something.return_value = 3 + + self.assertEqual(mock.something(), 3, "method returned wrong value") + self.assertTrue(mock.something.called, + "method didn't record being called") + + + def test_attributes_have_name_and_parent_set(self): + mock = Mock() + something = mock.something + + self.assertEqual(something._mock_name, "something", + "attribute name not set correctly") + self.assertEqual(something._mock_parent, mock, + "attribute parent not set correctly") + + + def test_method_calls_recorded(self): + mock = Mock() + mock.something(3, fish=None) + mock.something_else.something(6, cake=sentinel.Cake) + + self.assertEqual(mock.something_else.method_calls, + [("something", (6,), {'cake': sentinel.Cake})], + "method calls not recorded correctly") + self.assertEqual(mock.method_calls, [ + ("something", (3,), {'fish': None}), + ("something_else.something", (6,), {'cake': sentinel.Cake}) + ], + "method calls not recorded correctly") + + + def test_method_calls_compare_easily(self): + mock = Mock() + mock.something() + self.assertEqual(mock.method_calls, [('something',)]) + self.assertEqual(mock.method_calls, [('something', (), {})]) + + mock = Mock() + mock.something('different') + self.assertEqual(mock.method_calls, [('something', ('different',))]) + self.assertEqual(mock.method_calls, + [('something', ('different',), {})]) + + mock = Mock() + mock.something(x=1) + self.assertEqual(mock.method_calls, [('something', {'x': 1})]) + self.assertEqual(mock.method_calls, [('something', (), {'x': 1})]) + + mock = Mock() + mock.something('different', some='more') + self.assertEqual(mock.method_calls, [ + ('something', ('different',), {'some': 'more'}) + ]) + + + def test_only_allowed_methods_exist(self): + for spec in ['something'], ('something',): + for arg in 'spec', 'spec_set': + mock = Mock(**{arg: spec}) + + # this should be allowed + mock.something + self.assertRaisesRegex( + AttributeError, + "Mock object has no attribute 'something_else'", + getattr, mock, 'something_else' + ) + + + def test_from_spec(self): + class Something(object): + x = 3 + __something__ = None + def y(self): + pass + + def test_attributes(mock): + # should work + mock.x + mock.y + mock.__something__ + self.assertRaisesRegex( + AttributeError, + "Mock object has no attribute 'z'", + getattr, mock, 'z' + ) + self.assertRaisesRegex( + AttributeError, + "Mock object has no attribute '__foobar__'", + getattr, mock, '__foobar__' + ) + + test_attributes(Mock(spec=Something)) + test_attributes(Mock(spec=Something())) + + + def test_wraps_calls(self): + real = Mock() + + mock = Mock(wraps=real) + self.assertEqual(mock(), real()) + + real.reset_mock() + + mock(1, 2, fish=3) + real.assert_called_with(1, 2, fish=3) + + + def test_wraps_call_with_nondefault_return_value(self): + real = Mock() + + mock = Mock(wraps=real) + mock.return_value = 3 + + self.assertEqual(mock(), 3) + self.assertFalse(real.called) + + + def test_wraps_attributes(self): + class Real(object): + attribute = Mock() + + real = Real() + + mock = Mock(wraps=real) + self.assertEqual(mock.attribute(), real.attribute()) + self.assertRaises(AttributeError, lambda: mock.fish) + + self.assertNotEqual(mock.attribute, real.attribute) + result = mock.attribute.frog(1, 2, fish=3) + Real.attribute.frog.assert_called_with(1, 2, fish=3) + self.assertEqual(result, Real.attribute.frog()) + + + def test_exceptional_side_effect(self): + mock = Mock(side_effect=AttributeError) + self.assertRaises(AttributeError, mock) + + mock = Mock(side_effect=AttributeError('foo')) + self.assertRaises(AttributeError, mock) + + + def test_baseexceptional_side_effect(self): + mock = Mock(side_effect=KeyboardInterrupt) + self.assertRaises(KeyboardInterrupt, mock) + + mock = Mock(side_effect=KeyboardInterrupt('foo')) + self.assertRaises(KeyboardInterrupt, mock) + + + def test_assert_called_with_message(self): + mock = Mock() + self.assertRaisesRegex(AssertionError, 'Not called', + mock.assert_called_with) + + + def test_assert_called_once_with_message(self): + mock = Mock(name='geoffrey') + self.assertRaisesRegex(AssertionError, + r"Expected 'geoffrey' to be called once\.", + mock.assert_called_once_with) + + + def test__name__(self): + mock = Mock() + self.assertRaises(AttributeError, lambda: mock.__name__) + + mock.__name__ = 'foo' + self.assertEqual(mock.__name__, 'foo') + + + def test_spec_list_subclass(self): + class Sub(list): + pass + mock = Mock(spec=Sub(['foo'])) + + mock.append(3) + mock.append.assert_called_with(3) + self.assertRaises(AttributeError, getattr, mock, 'foo') + + + def test_spec_class(self): + class X(object): + pass + + mock = Mock(spec=X) + self.assertIsInstance(mock, X) + + mock = Mock(spec=X()) + self.assertIsInstance(mock, X) + + self.assertIs(mock.__class__, X) + self.assertEqual(Mock().__class__.__name__, 'Mock') + + mock = Mock(spec_set=X) + self.assertIsInstance(mock, X) + + mock = Mock(spec_set=X()) + self.assertIsInstance(mock, X) + + + def test_setting_attribute_with_spec_set(self): + class X(object): + y = 3 + + mock = Mock(spec=X) + mock.x = 'foo' + + mock = Mock(spec_set=X) + def set_attr(): + mock.x = 'foo' + + mock.y = 'foo' + self.assertRaises(AttributeError, set_attr) + + + def test_copy(self): + current = sys.getrecursionlimit() + self.addCleanup(sys.setrecursionlimit, current) + + # can't use sys.maxint as this doesn't exist in Python 3 + sys.setrecursionlimit(int(10e8)) + # this segfaults without the fix in place + copy.copy(Mock()) + + + @unittest.skipIf(six.PY3, "no old style classes in Python 3") + def test_spec_old_style_classes(self): + class Foo: + bar = 7 + + mock = Mock(spec=Foo) + mock.bar = 6 + self.assertRaises(AttributeError, lambda: mock.foo) + + mock = Mock(spec=Foo()) + mock.bar = 6 + self.assertRaises(AttributeError, lambda: mock.foo) + + + @unittest.skipIf(six.PY3, "no old style classes in Python 3") + def test_spec_set_old_style_classes(self): + class Foo: + bar = 7 + + mock = Mock(spec_set=Foo) + mock.bar = 6 + self.assertRaises(AttributeError, lambda: mock.foo) + + def _set(): + mock.foo = 3 + self.assertRaises(AttributeError, _set) + + mock = Mock(spec_set=Foo()) + mock.bar = 6 + self.assertRaises(AttributeError, lambda: mock.foo) + + def _set(): + mock.foo = 3 + self.assertRaises(AttributeError, _set) + + + def test_subclass_with_properties(self): + class SubClass(Mock): + def _get(self): + return 3 + def _set(self, value): + raise NameError('strange error') + some_attribute = property(_get, _set) + + s = SubClass(spec_set=SubClass) + self.assertEqual(s.some_attribute, 3) + + def test(): + s.some_attribute = 3 + self.assertRaises(NameError, test) + + def test(): + s.foo = 'bar' + self.assertRaises(AttributeError, test) + + + def test_setting_call(self): + mock = Mock() + def __call__(self, a): + return self._mock_call(a) + + type(mock).__call__ = __call__ + mock('one') + mock.assert_called_with('one') + + self.assertRaises(TypeError, mock, 'one', 'two') + + + def test_dir(self): + mock = Mock() + attrs = set(dir(mock)) + type_attrs = set([m for m in dir(Mock) if not m.startswith('_')]) + + # all public attributes from the type are included + self.assertEqual(set(), type_attrs - attrs) + + # creates these attributes + mock.a, mock.b + self.assertIn('a', dir(mock)) + self.assertIn('b', dir(mock)) + + # instance attributes + mock.c = mock.d = None + self.assertIn('c', dir(mock)) + self.assertIn('d', dir(mock)) + + # magic methods + mock.__iter__ = lambda s: iter([]) + self.assertIn('__iter__', dir(mock)) + + + def test_dir_from_spec(self): + mock = Mock(spec=unittest.TestCase) + testcase_attrs = set(dir(unittest.TestCase)) + attrs = set(dir(mock)) + + # all attributes from the spec are included + self.assertEqual(set(), testcase_attrs - attrs) + + # shadow a sys attribute + mock.version = 3 + self.assertEqual(dir(mock).count('version'), 1) + + + def test_filter_dir(self): + patcher = patch.object(mock, 'FILTER_DIR', False) + patcher.start() + try: + attrs = set(dir(Mock())) + type_attrs = set(dir(Mock)) + + # ALL attributes from the type are included + self.assertEqual(set(), type_attrs - attrs) + finally: + patcher.stop() + + + def test_configure_mock(self): + mock = Mock(foo='bar') + self.assertEqual(mock.foo, 'bar') + + mock = MagicMock(foo='bar') + self.assertEqual(mock.foo, 'bar') + + kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33, + 'foo': MagicMock()} + mock = Mock(**kwargs) + self.assertRaises(KeyError, mock) + self.assertEqual(mock.foo.bar(), 33) + self.assertIsInstance(mock.foo, MagicMock) + + mock = Mock() + mock.configure_mock(**kwargs) + self.assertRaises(KeyError, mock) + self.assertEqual(mock.foo.bar(), 33) + self.assertIsInstance(mock.foo, MagicMock) + + + def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs): + # needed because assertRaisesRegex doesn't work easily with newlines + try: + func(*args, **kwargs) + except: + instance = sys.exc_info()[1] + self.assertIsInstance(instance, exception) + else: + self.fail('Exception %r not raised' % (exception,)) + + msg = str(instance) + self.assertEqual(msg, message) + + + def test_assert_called_with_failure_message(self): + mock = NonCallableMock() + + expected = "mock(1, '2', 3, bar='foo')" + message = 'Expected call: %s\nNot called' + self.assertRaisesWithMsg( + AssertionError, message % (expected,), + mock.assert_called_with, 1, '2', 3, bar='foo' + ) + + mock.foo(1, '2', 3, foo='foo') + + + asserters = [ + mock.foo.assert_called_with, mock.foo.assert_called_once_with + ] + for meth in asserters: + actual = "foo(1, '2', 3, foo='foo')" + expected = "foo(1, '2', 3, bar='foo')" + message = 'Expected call: %s\nActual call: %s' + self.assertRaisesWithMsg( + AssertionError, message % (expected, actual), + meth, 1, '2', 3, bar='foo' + ) + + # just kwargs + for meth in asserters: + actual = "foo(1, '2', 3, foo='foo')" + expected = "foo(bar='foo')" + message = 'Expected call: %s\nActual call: %s' + self.assertRaisesWithMsg( + AssertionError, message % (expected, actual), + meth, bar='foo' + ) + + # just args + for meth in asserters: + actual = "foo(1, '2', 3, foo='foo')" + expected = "foo(1, 2, 3)" + message = 'Expected call: %s\nActual call: %s' + self.assertRaisesWithMsg( + AssertionError, message % (expected, actual), + meth, 1, 2, 3 + ) + + # empty + for meth in asserters: + actual = "foo(1, '2', 3, foo='foo')" + expected = "foo()" + message = 'Expected call: %s\nActual call: %s' + self.assertRaisesWithMsg( + AssertionError, message % (expected, actual), meth + ) + + + def test_mock_calls(self): + mock = MagicMock() + + # need to do this because MagicMock.mock_calls used to just return + # a MagicMock which also returned a MagicMock when __eq__ was called + self.assertIs(mock.mock_calls == [], True) + + mock = MagicMock() + mock() + expected = [('', (), {})] + self.assertEqual(mock.mock_calls, expected) + + mock.foo() + expected.append(call.foo()) + self.assertEqual(mock.mock_calls, expected) + # intermediate mock_calls work too + self.assertEqual(mock.foo.mock_calls, [('', (), {})]) + + mock = MagicMock() + mock().foo(1, 2, 3, a=4, b=5) + expected = [ + ('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5)) + ] + self.assertEqual(mock.mock_calls, expected) + self.assertEqual(mock.return_value.foo.mock_calls, + [('', (1, 2, 3), dict(a=4, b=5))]) + self.assertEqual(mock.return_value.mock_calls, + [('foo', (1, 2, 3), dict(a=4, b=5))]) + + mock = MagicMock() + mock().foo.bar().baz() + expected = [ + ('', (), {}), ('().foo.bar', (), {}), + ('().foo.bar().baz', (), {}) + ] + self.assertEqual(mock.mock_calls, expected) + self.assertEqual(mock().mock_calls, + call.foo.bar().baz().call_list()) + + for kwargs in dict(), dict(name='bar'): + mock = MagicMock(**kwargs) + int(mock.foo) + expected = [('foo.__int__', (), {})] + self.assertEqual(mock.mock_calls, expected) + + mock = MagicMock(**kwargs) + mock.a()() + expected = [('a', (), {}), ('a()', (), {})] + self.assertEqual(mock.mock_calls, expected) + self.assertEqual(mock.a().mock_calls, [call()]) + + mock = MagicMock(**kwargs) + mock(1)(2)(3) + self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list()) + self.assertEqual(mock().mock_calls, call(2)(3).call_list()) + self.assertEqual(mock()().mock_calls, call(3).call_list()) + + mock = MagicMock(**kwargs) + mock(1)(2)(3).a.b.c(4) + self.assertEqual(mock.mock_calls, + call(1)(2)(3).a.b.c(4).call_list()) + self.assertEqual(mock().mock_calls, + call(2)(3).a.b.c(4).call_list()) + self.assertEqual(mock()().mock_calls, + call(3).a.b.c(4).call_list()) + + mock = MagicMock(**kwargs) + int(mock().foo.bar().baz()) + last_call = ('().foo.bar().baz().__int__', (), {}) + self.assertEqual(mock.mock_calls[-1], last_call) + self.assertEqual(mock().mock_calls, + call.foo.bar().baz().__int__().call_list()) + self.assertEqual(mock().foo.bar().mock_calls, + call.baz().__int__().call_list()) + self.assertEqual(mock().foo.bar().baz.mock_calls, + call().__int__().call_list()) + + + def test_subclassing(self): + class Subclass(Mock): + pass + + mock = Subclass() + self.assertIsInstance(mock.foo, Subclass) + self.assertIsInstance(mock(), Subclass) + + class Subclass(Mock): + def _get_child_mock(self, **kwargs): + return Mock(**kwargs) + + mock = Subclass() + self.assertNotIsInstance(mock.foo, Subclass) + self.assertNotIsInstance(mock(), Subclass) + + + def test_arg_lists(self): + mocks = [ + Mock(), + MagicMock(), + NonCallableMock(), + NonCallableMagicMock() + ] + + def assert_attrs(mock): + names = 'call_args_list', 'method_calls', 'mock_calls' + for name in names: + attr = getattr(mock, name) + self.assertIsInstance(attr, _CallList) + self.assertIsInstance(attr, list) + self.assertEqual(attr, []) + + for mock in mocks: + assert_attrs(mock) + + if callable(mock): + mock() + mock(1, 2) + mock(a=3) + + mock.reset_mock() + assert_attrs(mock) + + mock.foo() + mock.foo.bar(1, a=3) + mock.foo(1).bar().baz(3) + + mock.reset_mock() + assert_attrs(mock) + + + def test_call_args_two_tuple(self): + mock = Mock() + mock(1, a=3) + mock(2, b=4) + + self.assertEqual(len(mock.call_args), 2) + args, kwargs = mock.call_args + self.assertEqual(args, (2,)) + self.assertEqual(kwargs, dict(b=4)) + + expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))] + for expected, call_args in zip(expected_list, mock.call_args_list): + self.assertEqual(len(call_args), 2) + self.assertEqual(expected[0], call_args[0]) + self.assertEqual(expected[1], call_args[1]) + + + def test_side_effect_iterator(self): + mock = Mock(side_effect=iter([1, 2, 3])) + self.assertEqual([mock(), mock(), mock()], [1, 2, 3]) + self.assertRaises(StopIteration, mock) + + mock = MagicMock(side_effect=['a', 'b', 'c']) + self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c']) + self.assertRaises(StopIteration, mock) + + mock = Mock(side_effect='ghi') + self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i']) + self.assertRaises(StopIteration, mock) + + class Foo(object): + pass + mock = MagicMock(side_effect=Foo) + self.assertIsInstance(mock(), Foo) + + mock = Mock(side_effect=Iter()) + self.assertEqual([mock(), mock(), mock(), mock()], + ['this', 'is', 'an', 'iter']) + self.assertRaises(StopIteration, mock) + + + def test_side_effect_setting_iterator(self): + mock = Mock() + mock.side_effect = iter([1, 2, 3]) + self.assertEqual([mock(), mock(), mock()], [1, 2, 3]) + self.assertRaises(StopIteration, mock) + side_effect = mock.side_effect + self.assertIsInstance(side_effect, type(iter([]))) + + mock.side_effect = ['a', 'b', 'c'] + self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c']) + self.assertRaises(StopIteration, mock) + side_effect = mock.side_effect + self.assertIsInstance(side_effect, type(iter([]))) + + this_iter = Iter() + mock.side_effect = this_iter + self.assertEqual([mock(), mock(), mock(), mock()], + ['this', 'is', 'an', 'iter']) + self.assertRaises(StopIteration, mock) + self.assertIs(mock.side_effect, this_iter) + + + def test_side_effect_iterator_exceptions(self): + for Klass in Mock, MagicMock: + iterable = (ValueError, 3, KeyError, 6) + m = Klass(side_effect=iterable) + self.assertRaises(ValueError, m) + self.assertEqual(m(), 3) + self.assertRaises(KeyError, m) + self.assertEqual(m(), 6) + + + def test_side_effect_iterator_default(self): + mock = Mock(return_value=2) + mock.side_effect = iter([1, DEFAULT]) + self.assertEqual([mock(), mock()], [1, 2]) + + def test_assert_has_calls_any_order(self): + mock = Mock() + mock(1, 2) + mock(a=3) + mock(3, 4) + mock(b=6) + mock(b=6) + + kalls = [ + call(1, 2), ({'a': 3},), + ((3, 4),), ((), {'a': 3}), + ('', (1, 2)), ('', {'a': 3}), + ('', (1, 2), {}), ('', (), {'a': 3}) + ] + for kall in kalls: + mock.assert_has_calls([kall], any_order=True) + + for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo': + self.assertRaises( + AssertionError, mock.assert_has_calls, + [kall], any_order=True + ) + + kall_lists = [ + [call(1, 2), call(b=6)], + [call(3, 4), call(1, 2)], + [call(b=6), call(b=6)], + ] + + for kall_list in kall_lists: + mock.assert_has_calls(kall_list, any_order=True) + + kall_lists = [ + [call(b=6), call(b=6), call(b=6)], + [call(1, 2), call(1, 2)], + [call(3, 4), call(1, 2), call(5, 7)], + [call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)], + ] + for kall_list in kall_lists: + self.assertRaises( + AssertionError, mock.assert_has_calls, + kall_list, any_order=True + ) + + def test_assert_has_calls(self): + kalls1 = [ + call(1, 2), ({'a': 3},), + ((3, 4),), call(b=6), + ('', (1,), {'b': 6}), + ] + kalls2 = [call.foo(), call.bar(1)] + kalls2.extend(call.spam().baz(a=3).call_list()) + kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list()) + + mocks = [] + for mock in Mock(), MagicMock(): + mock(1, 2) + mock(a=3) + mock(3, 4) + mock(b=6) + mock(1, b=6) + mocks.append((mock, kalls1)) + + mock = Mock() + mock.foo() + mock.bar(1) + mock.spam().baz(a=3) + mock.bam(set(), foo={}).fish([1]) + mocks.append((mock, kalls2)) + + for mock, kalls in mocks: + for i in range(len(kalls)): + for step in 1, 2, 3: + these = kalls[i:i+step] + mock.assert_has_calls(these) + + if len(these) > 1: + self.assertRaises( + AssertionError, + mock.assert_has_calls, + list(reversed(these)) + ) + + + def test_assert_has_calls_with_function_spec(self): + def f(a, b, c, d=None): + pass + + mock = Mock(spec=f) + + mock(1, b=2, c=3) + mock(4, 5, c=6, d=7) + mock(10, 11, c=12) + calls = [ + ('', (1, 2, 3), {}), + ('', (4, 5, 6), {'d': 7}), + ((10, 11, 12), {}), + ] + mock.assert_has_calls(calls) + mock.assert_has_calls(calls, any_order=True) + mock.assert_has_calls(calls[1:]) + mock.assert_has_calls(calls[1:], any_order=True) + mock.assert_has_calls(calls[:-1]) + mock.assert_has_calls(calls[:-1], any_order=True) + # Reversed order + calls = list(reversed(calls)) + with self.assertRaises(AssertionError): + mock.assert_has_calls(calls) + mock.assert_has_calls(calls, any_order=True) + with self.assertRaises(AssertionError): + mock.assert_has_calls(calls[1:]) + mock.assert_has_calls(calls[1:], any_order=True) + with self.assertRaises(AssertionError): + mock.assert_has_calls(calls[:-1]) + mock.assert_has_calls(calls[:-1], any_order=True) + + + def test_assert_any_call(self): + mock = Mock() + mock(1, 2) + mock(a=3) + mock(1, b=6) + + mock.assert_any_call(1, 2) + mock.assert_any_call(a=3) + mock.assert_any_call(1, b=6) + + self.assertRaises( + AssertionError, + mock.assert_any_call + ) + self.assertRaises( + AssertionError, + mock.assert_any_call, + 1, 3 + ) + self.assertRaises( + AssertionError, + mock.assert_any_call, + a=4 + ) + + + def test_assert_any_call_with_function_spec(self): + def f(a, b, c, d=None): + pass + + mock = Mock(spec=f) + + mock(1, b=2, c=3) + mock(4, 5, c=6, d=7) + mock.assert_any_call(1, 2, 3) + mock.assert_any_call(a=1, b=2, c=3) + mock.assert_any_call(4, 5, 6, 7) + mock.assert_any_call(a=4, b=5, c=6, d=7) + self.assertRaises(AssertionError, mock.assert_any_call, + 1, b=3, c=2) + # Expected call doesn't match the spec's signature + with self.assertRaises(AssertionError) as cm: + mock.assert_any_call(e=8) + if hasattr(cm.exception, '__cause__'): + self.assertIsInstance(cm.exception.__cause__, TypeError) + + + def test_mock_calls_create_autospec(self): + def f(a, b): + pass + obj = Iter() + obj.f = f + + funcs = [ + create_autospec(f), + create_autospec(obj).f + ] + for func in funcs: + func(1, 2) + func(3, 4) + + self.assertEqual( + func.mock_calls, [call(1, 2), call(3, 4)] + ) + + #Issue21222 + def test_create_autospec_with_name(self): + m = mock.create_autospec(object(), name='sweet_func') + self.assertIn('sweet_func', repr(m)) + + #Issue21238 + def test_mock_unsafe(self): + m = Mock() + with self.assertRaises(AttributeError): + m.assert_foo_call() + with self.assertRaises(AttributeError): + m.assret_foo_call() + m = Mock(unsafe=True) + m.assert_foo_call() + m.assret_foo_call() + + #Issue21262 + def test_assert_not_called(self): + m = Mock() + m.hello.assert_not_called() + m.hello() + with self.assertRaises(AssertionError): + m.hello.assert_not_called() + + def test_assert_called(self): + m = Mock() + with self.assertRaises(AssertionError): + m.hello.assert_called() + m.hello() + m.hello.assert_called() + + m.hello() + m.hello.assert_called() + + def test_assert_called_once(self): + m = Mock() + with self.assertRaises(AssertionError): + m.hello.assert_called_once() + m.hello() + m.hello.assert_called_once() + + m.hello() + with self.assertRaises(AssertionError): + m.hello.assert_called_once() + + #Issue21256 printout of keyword args should be in deterministic order + def test_sorted_call_signature(self): + m = Mock() + m.hello(name='hello', daddy='hero') + text = "call(daddy='hero', name='hello')" + self.assertEqual(repr(m.hello.call_args), text) + + #Issue21270 overrides tuple methods for mock.call objects + def test_override_tuple_methods(self): + c = call.count() + i = call.index(132,'hello') + m = Mock() + m.count() + m.index(132,"hello") + self.assertEqual(m.method_calls[0], c) + self.assertEqual(m.method_calls[1], i) + + def test_mock_add_spec(self): + class _One(object): + one = 1 + class _Two(object): + two = 2 + class Anything(object): + one = two = three = 'four' + + klasses = [ + Mock, MagicMock, NonCallableMock, NonCallableMagicMock + ] + for Klass in list(klasses): + klasses.append(lambda K=Klass: K(spec=Anything)) + klasses.append(lambda K=Klass: K(spec_set=Anything)) + + for Klass in klasses: + for kwargs in dict(), dict(spec_set=True): + mock = Klass() + #no error + mock.one, mock.two, mock.three + + for One, Two in [(_One, _Two), (['one'], ['two'])]: + for kwargs in dict(), dict(spec_set=True): + mock.mock_add_spec(One, **kwargs) + + mock.one + self.assertRaises( + AttributeError, getattr, mock, 'two' + ) + self.assertRaises( + AttributeError, getattr, mock, 'three' + ) + if 'spec_set' in kwargs: + self.assertRaises( + AttributeError, setattr, mock, 'three', None + ) + + mock.mock_add_spec(Two, **kwargs) + self.assertRaises( + AttributeError, getattr, mock, 'one' + ) + mock.two + self.assertRaises( + AttributeError, getattr, mock, 'three' + ) + if 'spec_set' in kwargs: + self.assertRaises( + AttributeError, setattr, mock, 'three', None + ) + # note that creating a mock, setting an instance attribute, and + # *then* setting a spec doesn't work. Not the intended use case + + + def test_mock_add_spec_magic_methods(self): + for Klass in MagicMock, NonCallableMagicMock: + mock = Klass() + int(mock) + + mock.mock_add_spec(object) + self.assertRaises(TypeError, int, mock) + + mock = Klass() + mock['foo'] + mock.__int__.return_value =4 + + mock.mock_add_spec(int) + self.assertEqual(int(mock), 4) + self.assertRaises(TypeError, lambda: mock['foo']) + + + def test_adding_child_mock(self): + for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock: + mock = Klass() + + mock.foo = Mock() + mock.foo() + + self.assertEqual(mock.method_calls, [call.foo()]) + self.assertEqual(mock.mock_calls, [call.foo()]) + + mock = Klass() + mock.bar = Mock(name='name') + mock.bar() + self.assertEqual(mock.method_calls, []) + self.assertEqual(mock.mock_calls, []) + + # mock with an existing _new_parent but no name + mock = Klass() + mock.baz = MagicMock()() + mock.baz() + self.assertEqual(mock.method_calls, []) + self.assertEqual(mock.mock_calls, []) + + + def test_adding_return_value_mock(self): + for Klass in Mock, MagicMock: + mock = Klass() + mock.return_value = MagicMock() + + mock()() + self.assertEqual(mock.mock_calls, [call(), call()()]) + + + def test_manager_mock(self): + class Foo(object): + one = 'one' + two = 'two' + manager = Mock() + p1 = patch.object(Foo, 'one') + p2 = patch.object(Foo, 'two') + + mock_one = p1.start() + self.addCleanup(p1.stop) + mock_two = p2.start() + self.addCleanup(p2.stop) + + manager.attach_mock(mock_one, 'one') + manager.attach_mock(mock_two, 'two') + + Foo.two() + Foo.one() + + self.assertEqual(manager.mock_calls, [call.two(), call.one()]) + + + def test_magic_methods_mock_calls(self): + for Klass in Mock, MagicMock: + m = Klass() + m.__int__ = Mock(return_value=3) + m.__float__ = MagicMock(return_value=3.0) + int(m) + float(m) + + self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()]) + self.assertEqual(m.method_calls, []) + + def test_mock_open_reuse_issue_21750(self): + mocked_open = mock.mock_open(read_data='data') + f1 = mocked_open('a-name') + f1_data = f1.read() + f2 = mocked_open('another-name') + f2_data = f2.read() + self.assertEqual(f1_data, f2_data) + + def test_mock_open_write(self): + # Test exception in file writing write() + mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV')) + with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp): + mock_filehandle = mock_namedtemp.return_value + mock_write = mock_filehandle.write + mock_write.side_effect = OSError('Test 2 Error') + def attempt(): + tempfile.NamedTemporaryFile().write('asd') + self.assertRaises(OSError, attempt) + + def test_mock_open_alter_readline(self): + mopen = mock.mock_open(read_data='foo\nbarn') + mopen.return_value.readline.side_effect = lambda *args:'abc' + first = mopen().readline() + second = mopen().readline() + self.assertEqual('abc', first) + self.assertEqual('abc', second) + + def test_mock_parents(self): + for Klass in Mock, MagicMock: + m = Klass() + original_repr = repr(m) + m.return_value = m + self.assertIs(m(), m) + self.assertEqual(repr(m), original_repr) + + m.reset_mock() + self.assertIs(m(), m) + self.assertEqual(repr(m), original_repr) + + m = Klass() + m.b = m.a + self.assertIn("name='mock.a'", repr(m.b)) + self.assertIn("name='mock.a'", repr(m.a)) + m.reset_mock() + self.assertIn("name='mock.a'", repr(m.b)) + self.assertIn("name='mock.a'", repr(m.a)) + + m = Klass() + original_repr = repr(m) + m.a = m() + m.a.return_value = m + + self.assertEqual(repr(m), original_repr) + self.assertEqual(repr(m.a()), original_repr) + + + def test_attach_mock(self): + classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock + for Klass in classes: + for Klass2 in classes: + m = Klass() + + m2 = Klass2(name='foo') + m.attach_mock(m2, 'bar') + + self.assertIs(m.bar, m2) + self.assertIn("name='mock.bar'", repr(m2)) + + m.bar.baz(1) + self.assertEqual(m.mock_calls, [call.bar.baz(1)]) + self.assertEqual(m.method_calls, [call.bar.baz(1)]) + + + def test_attach_mock_return_value(self): + classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock + for Klass in Mock, MagicMock: + for Klass2 in classes: + m = Klass() + + m2 = Klass2(name='foo') + m.attach_mock(m2, 'return_value') + + self.assertIs(m(), m2) + self.assertIn("name='mock()'", repr(m2)) + + m2.foo() + self.assertEqual(m.mock_calls, call().foo().call_list()) + + + def test_attribute_deletion(self): + for mock in (Mock(), MagicMock(), NonCallableMagicMock(), + NonCallableMock()): + self.assertTrue(hasattr(mock, 'm')) + + del mock.m + self.assertFalse(hasattr(mock, 'm')) + + del mock.f + self.assertFalse(hasattr(mock, 'f')) + self.assertRaises(AttributeError, getattr, mock, 'f') + + + def test_class_assignable(self): + for mock in Mock(), MagicMock(): + self.assertNotIsInstance(mock, int) + + mock.__class__ = int + self.assertIsInstance(mock, int) + mock.foo + + + @unittest.expectedFailure + def test_pickle(self): + for Klass in (MagicMock, Mock, Subclass, NonCallableMagicMock): + mock = Klass(name='foo', attribute=3) + mock.foo(1, 2, 3) + data = pickle.dumps(mock) + new = pickle.loads(data) + + new.foo.assert_called_once_with(1, 2, 3) + self.assertFalse(new.called) + self.assertTrue(is_instance(new, Klass)) + self.assertIsInstance(new, Thing) + self.assertIn('name="foo"', repr(new)) + self.assertEqual(new.attribute, 3) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/testpatch.py b/ext/mock/tests/testpatch.py new file mode 100644 index 0000000000..32a6c2712f --- /dev/null +++ b/ext/mock/tests/testpatch.py @@ -0,0 +1,1883 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +import os +import sys + +import six +import unittest2 as unittest + +from mock.tests import support +from mock.tests.support import SomeClass, is_instance, callable + +from mock import ( + NonCallableMock, CallableMixin, patch, sentinel, + MagicMock, Mock, NonCallableMagicMock, patch, + DEFAULT, call +) +from mock.mock import _patch, _get_target + +builtin_string = '__builtin__' +if six.PY3: + builtin_string = 'builtins' + unicode = str + +PTModule = sys.modules[__name__] +MODNAME = '%s.PTModule' % __name__ + + +def _get_proxy(obj, get_only=True): + class Proxy(object): + def __getattr__(self, name): + return getattr(obj, name) + if not get_only: + def __setattr__(self, name, value): + setattr(obj, name, value) + def __delattr__(self, name): + delattr(obj, name) + Proxy.__setattr__ = __setattr__ + Proxy.__delattr__ = __delattr__ + return Proxy() + + +# for use in the test +something = sentinel.Something +something_else = sentinel.SomethingElse + + +class Foo(object): + def __init__(self, a): + pass + def f(self, a): + pass + def g(self): + pass + foo = 'bar' + + class Bar(object): + def a(self): + pass + +foo_name = '%s.Foo' % __name__ + + +def function(a, b=Foo): + pass + + +class Container(object): + def __init__(self): + self.values = {} + + def __getitem__(self, name): + return self.values[name] + + def __setitem__(self, name, value): + self.values[name] = value + + def __delitem__(self, name): + del self.values[name] + + def __iter__(self): + return iter(self.values) + + + +class PatchTest(unittest.TestCase): + + def assertNotCallable(self, obj, magic=True): + MockClass = NonCallableMagicMock + if not magic: + MockClass = NonCallableMock + + self.assertRaises(TypeError, obj) + self.assertTrue(is_instance(obj, MockClass)) + self.assertFalse(is_instance(obj, CallableMixin)) + + + def test_single_patchobject(self): + class Something(object): + attribute = sentinel.Original + + @patch.object(Something, 'attribute', sentinel.Patched) + def test(): + self.assertEqual(Something.attribute, sentinel.Patched, "unpatched") + + test() + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + + + def test_patchobject_with_none(self): + class Something(object): + attribute = sentinel.Original + + @patch.object(Something, 'attribute', None) + def test(): + self.assertIsNone(Something.attribute, "unpatched") + + test() + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + + + def test_multiple_patchobject(self): + class Something(object): + attribute = sentinel.Original + next_attribute = sentinel.Original2 + + @patch.object(Something, 'attribute', sentinel.Patched) + @patch.object(Something, 'next_attribute', sentinel.Patched2) + def test(): + self.assertEqual(Something.attribute, sentinel.Patched, + "unpatched") + self.assertEqual(Something.next_attribute, sentinel.Patched2, + "unpatched") + + test() + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + self.assertEqual(Something.next_attribute, sentinel.Original2, + "patch not restored") + + + def test_object_lookup_is_quite_lazy(self): + global something + original = something + @patch('%s.something' % __name__, sentinel.Something2) + def test(): + pass + + try: + something = sentinel.replacement_value + test() + self.assertEqual(something, sentinel.replacement_value) + finally: + something = original + + + def test_patch(self): + @patch('%s.something' % __name__, sentinel.Something2) + def test(): + self.assertEqual(PTModule.something, sentinel.Something2, + "unpatched") + + test() + self.assertEqual(PTModule.something, sentinel.Something, + "patch not restored") + + @patch('%s.something' % __name__, sentinel.Something2) + @patch('%s.something_else' % __name__, sentinel.SomethingElse) + def test(): + self.assertEqual(PTModule.something, sentinel.Something2, + "unpatched") + self.assertEqual(PTModule.something_else, sentinel.SomethingElse, + "unpatched") + + self.assertEqual(PTModule.something, sentinel.Something, + "patch not restored") + self.assertEqual(PTModule.something_else, sentinel.SomethingElse, + "patch not restored") + + # Test the patching and restoring works a second time + test() + + self.assertEqual(PTModule.something, sentinel.Something, + "patch not restored") + self.assertEqual(PTModule.something_else, sentinel.SomethingElse, + "patch not restored") + + mock = Mock() + mock.return_value = sentinel.Handle + @patch('%s.open' % builtin_string, mock) + def test(): + self.assertEqual(open('filename', 'r'), sentinel.Handle, + "open not patched") + test() + test() + + self.assertNotEqual(open, mock, "patch not restored") + + + def test_patch_class_attribute(self): + @patch('%s.SomeClass.class_attribute' % __name__, + sentinel.ClassAttribute) + def test(): + self.assertEqual(PTModule.SomeClass.class_attribute, + sentinel.ClassAttribute, "unpatched") + test() + + self.assertIsNone(PTModule.SomeClass.class_attribute, + "patch not restored") + + + def test_patchobject_with_default_mock(self): + class Test(object): + something = sentinel.Original + something2 = sentinel.Original2 + + @patch.object(Test, 'something') + def test(mock): + self.assertEqual(mock, Test.something, + "Mock not passed into test function") + self.assertIsInstance(mock, MagicMock, + "patch with two arguments did not create a mock") + + test() + + @patch.object(Test, 'something') + @patch.object(Test, 'something2') + def test(this1, this2, mock1, mock2): + self.assertEqual(this1, sentinel.this1, + "Patched function didn't receive initial argument") + self.assertEqual(this2, sentinel.this2, + "Patched function didn't receive second argument") + self.assertEqual(mock1, Test.something2, + "Mock not passed into test function") + self.assertEqual(mock2, Test.something, + "Second Mock not passed into test function") + self.assertIsInstance(mock2, MagicMock, + "patch with two arguments did not create a mock") + self.assertIsInstance(mock2, MagicMock, + "patch with two arguments did not create a mock") + + # A hack to test that new mocks are passed the second time + self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1") + self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1") + return mock1, mock2 + + outerMock1 = outerMock2 = None + outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2) + + # Test that executing a second time creates new mocks + test(sentinel.this1, sentinel.this2) + + + def test_patch_with_spec(self): + @patch('%s.SomeClass' % __name__, spec=SomeClass) + def test(MockSomeClass): + self.assertEqual(SomeClass, MockSomeClass) + self.assertTrue(is_instance(SomeClass.wibble, MagicMock)) + self.assertRaises(AttributeError, lambda: SomeClass.not_wibble) + + test() + + + def test_patchobject_with_spec(self): + @patch.object(SomeClass, 'class_attribute', spec=SomeClass) + def test(MockAttribute): + self.assertEqual(SomeClass.class_attribute, MockAttribute) + self.assertTrue(is_instance(SomeClass.class_attribute.wibble, + MagicMock)) + self.assertRaises(AttributeError, + lambda: SomeClass.class_attribute.not_wibble) + + test() + + + def test_patch_with_spec_as_list(self): + @patch('%s.SomeClass' % __name__, spec=['wibble']) + def test(MockSomeClass): + self.assertEqual(SomeClass, MockSomeClass) + self.assertTrue(is_instance(SomeClass.wibble, MagicMock)) + self.assertRaises(AttributeError, lambda: SomeClass.not_wibble) + + test() + + + def test_patchobject_with_spec_as_list(self): + @patch.object(SomeClass, 'class_attribute', spec=['wibble']) + def test(MockAttribute): + self.assertEqual(SomeClass.class_attribute, MockAttribute) + self.assertTrue(is_instance(SomeClass.class_attribute.wibble, + MagicMock)) + self.assertRaises(AttributeError, + lambda: SomeClass.class_attribute.not_wibble) + + test() + + + def test_nested_patch_with_spec_as_list(self): + # regression test for nested decorators + @patch('%s.open' % builtin_string) + @patch('%s.SomeClass' % __name__, spec=['wibble']) + def test(MockSomeClass, MockOpen): + self.assertEqual(SomeClass, MockSomeClass) + self.assertTrue(is_instance(SomeClass.wibble, MagicMock)) + self.assertRaises(AttributeError, lambda: SomeClass.not_wibble) + test() + + + def test_patch_with_spec_as_boolean(self): + @patch('%s.SomeClass' % __name__, spec=True) + def test(MockSomeClass): + self.assertEqual(SomeClass, MockSomeClass) + # Should not raise attribute error + MockSomeClass.wibble + + self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble) + + test() + + + def test_patch_object_with_spec_as_boolean(self): + @patch.object(PTModule, 'SomeClass', spec=True) + def test(MockSomeClass): + self.assertEqual(SomeClass, MockSomeClass) + # Should not raise attribute error + MockSomeClass.wibble + + self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble) + + test() + + + def test_patch_class_acts_with_spec_is_inherited(self): + @patch('%s.SomeClass' % __name__, spec=True) + def test(MockSomeClass): + self.assertTrue(is_instance(MockSomeClass, MagicMock)) + instance = MockSomeClass() + self.assertNotCallable(instance) + # Should not raise attribute error + instance.wibble + + self.assertRaises(AttributeError, lambda: instance.not_wibble) + + test() + + + def test_patch_with_create_mocks_non_existent_attributes(self): + @patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True) + def test(): + self.assertEqual(frooble, sentinel.Frooble) + + test() + self.assertRaises(NameError, lambda: frooble) + + + def test_patchobject_with_create_mocks_non_existent_attributes(self): + @patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True) + def test(): + self.assertEqual(SomeClass.frooble, sentinel.Frooble) + + test() + self.assertFalse(hasattr(SomeClass, 'frooble')) + + + def test_patch_wont_create_by_default(self): + try: + @patch('%s.frooble' % builtin_string, sentinel.Frooble) + def test(): + self.assertEqual(frooble, sentinel.Frooble) + + test() + except AttributeError: + pass + else: + self.fail('Patching non existent attributes should fail') + + self.assertRaises(NameError, lambda: frooble) + + + def test_patchobject_wont_create_by_default(self): + try: + @patch.object(SomeClass, 'ord', sentinel.Frooble) + def test(): + self.fail('Patching non existent attributes should fail') + + test() + except AttributeError: + pass + else: + self.fail('Patching non existent attributes should fail') + self.assertFalse(hasattr(SomeClass, 'ord')) + + + def test_patch_builtins_without_create(self): + @patch(__name__+'.ord') + def test_ord(mock_ord): + mock_ord.return_value = 101 + return ord('c') + + @patch(__name__+'.open') + def test_open(mock_open): + m = mock_open.return_value + m.read.return_value = 'abcd' + + fobj = open('doesnotexists.txt') + data = fobj.read() + fobj.close() + return data + + self.assertEqual(test_ord(), 101) + self.assertEqual(test_open(), 'abcd') + + + def test_patch_with_static_methods(self): + class Foo(object): + @staticmethod + def woot(): + return sentinel.Static + + @patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched)) + def anonymous(): + self.assertEqual(Foo.woot(), sentinel.Patched) + anonymous() + + self.assertEqual(Foo.woot(), sentinel.Static) + + + def test_patch_local(self): + foo = sentinel.Foo + @patch.object(sentinel, 'Foo', 'Foo') + def anonymous(): + self.assertEqual(sentinel.Foo, 'Foo') + anonymous() + + self.assertEqual(sentinel.Foo, foo) + + + def test_patch_slots(self): + class Foo(object): + __slots__ = ('Foo',) + + foo = Foo() + foo.Foo = sentinel.Foo + + @patch.object(foo, 'Foo', 'Foo') + def anonymous(): + self.assertEqual(foo.Foo, 'Foo') + anonymous() + + self.assertEqual(foo.Foo, sentinel.Foo) + + + def test_patchobject_class_decorator(self): + class Something(object): + attribute = sentinel.Original + + class Foo(object): + def test_method(other_self): + self.assertEqual(Something.attribute, sentinel.Patched, + "unpatched") + def not_test_method(other_self): + self.assertEqual(Something.attribute, sentinel.Original, + "non-test method patched") + + Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo) + + f = Foo() + f.test_method() + f.not_test_method() + + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + + + def test_patch_class_decorator(self): + class Something(object): + attribute = sentinel.Original + + class Foo(object): + def test_method(other_self, mock_something): + self.assertEqual(PTModule.something, mock_something, + "unpatched") + def not_test_method(other_self): + self.assertEqual(PTModule.something, sentinel.Something, + "non-test method patched") + Foo = patch('%s.something' % __name__)(Foo) + + f = Foo() + f.test_method() + f.not_test_method() + + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + self.assertEqual(PTModule.something, sentinel.Something, + "patch not restored") + + + def test_patchobject_twice(self): + class Something(object): + attribute = sentinel.Original + next_attribute = sentinel.Original2 + + @patch.object(Something, 'attribute', sentinel.Patched) + @patch.object(Something, 'attribute', sentinel.Patched) + def test(): + self.assertEqual(Something.attribute, sentinel.Patched, "unpatched") + + test() + + self.assertEqual(Something.attribute, sentinel.Original, + "patch not restored") + + + def test_patch_dict(self): + foo = {'initial': object(), 'other': 'something'} + original = foo.copy() + + @patch.dict(foo) + def test(): + foo['a'] = 3 + del foo['initial'] + foo['other'] = 'something else' + + test() + + self.assertEqual(foo, original) + + @patch.dict(foo, {'a': 'b'}) + def test(): + self.assertEqual(len(foo), 3) + self.assertEqual(foo['a'], 'b') + + test() + + self.assertEqual(foo, original) + + @patch.dict(foo, [('a', 'b')]) + def test(): + self.assertEqual(len(foo), 3) + self.assertEqual(foo['a'], 'b') + + test() + + self.assertEqual(foo, original) + + + def test_patch_dict_with_container_object(self): + foo = Container() + foo['initial'] = object() + foo['other'] = 'something' + + original = foo.values.copy() + + @patch.dict(foo) + def test(): + foo['a'] = 3 + del foo['initial'] + foo['other'] = 'something else' + + test() + + self.assertEqual(foo.values, original) + + @patch.dict(foo, {'a': 'b'}) + def test(): + self.assertEqual(len(foo.values), 3) + self.assertEqual(foo['a'], 'b') + + test() + + self.assertEqual(foo.values, original) + + + def test_patch_dict_with_clear(self): + foo = {'initial': object(), 'other': 'something'} + original = foo.copy() + + @patch.dict(foo, clear=True) + def test(): + self.assertEqual(foo, {}) + foo['a'] = 3 + foo['other'] = 'something else' + + test() + + self.assertEqual(foo, original) + + @patch.dict(foo, {'a': 'b'}, clear=True) + def test(): + self.assertEqual(foo, {'a': 'b'}) + + test() + + self.assertEqual(foo, original) + + @patch.dict(foo, [('a', 'b')], clear=True) + def test(): + self.assertEqual(foo, {'a': 'b'}) + + test() + + self.assertEqual(foo, original) + + + def test_patch_dict_with_container_object_and_clear(self): + foo = Container() + foo['initial'] = object() + foo['other'] = 'something' + + original = foo.values.copy() + + @patch.dict(foo, clear=True) + def test(): + self.assertEqual(foo.values, {}) + foo['a'] = 3 + foo['other'] = 'something else' + + test() + + self.assertEqual(foo.values, original) + + @patch.dict(foo, {'a': 'b'}, clear=True) + def test(): + self.assertEqual(foo.values, {'a': 'b'}) + + test() + + self.assertEqual(foo.values, original) + + + def test_name_preserved(self): + foo = {} + + @patch('%s.SomeClass' % __name__, object()) + @patch('%s.SomeClass' % __name__, object(), autospec=True) + @patch.object(SomeClass, object()) + @patch.dict(foo) + def some_name(): + pass + + self.assertEqual(some_name.__name__, 'some_name') + + + def test_patch_with_exception(self): + foo = {} + + @patch.dict(foo, {'a': 'b'}) + def test(): + raise NameError('Konrad') + try: + test() + except NameError: + pass + else: + self.fail('NameError not raised by test') + + self.assertEqual(foo, {}) + + + def test_patch_dict_with_string(self): + @patch.dict('os.environ', {'konrad_delong': 'some value'}) + def test(): + self.assertIn('konrad_delong', os.environ) + + test() + + + @unittest.expectedFailure + def test_patch_descriptor(self): + # would be some effort to fix this - we could special case the + # builtin descriptors: classmethod, property, staticmethod + class Nothing(object): + foo = None + + class Something(object): + foo = {} + + @patch.object(Nothing, 'foo', 2) + @classmethod + def klass(cls): + self.assertIs(cls, Something) + + @patch.object(Nothing, 'foo', 2) + @staticmethod + def static(arg): + return arg + + @patch.dict(foo) + @classmethod + def klass_dict(cls): + self.assertIs(cls, Something) + + @patch.dict(foo) + @staticmethod + def static_dict(arg): + return arg + + # these will raise exceptions if patching descriptors is broken + self.assertEqual(Something.static('f00'), 'f00') + Something.klass() + self.assertEqual(Something.static_dict('f00'), 'f00') + Something.klass_dict() + + something = Something() + self.assertEqual(something.static('f00'), 'f00') + something.klass() + self.assertEqual(something.static_dict('f00'), 'f00') + something.klass_dict() + + + def test_patch_spec_set(self): + @patch('%s.SomeClass' % __name__, spec_set=SomeClass) + def test(MockClass): + MockClass.z = 'foo' + + self.assertRaises(AttributeError, test) + + @patch.object(support, 'SomeClass', spec_set=SomeClass) + def test(MockClass): + MockClass.z = 'foo' + + self.assertRaises(AttributeError, test) + @patch('%s.SomeClass' % __name__, spec_set=True) + def test(MockClass): + MockClass.z = 'foo' + + self.assertRaises(AttributeError, test) + + @patch.object(support, 'SomeClass', spec_set=True) + def test(MockClass): + MockClass.z = 'foo' + + self.assertRaises(AttributeError, test) + + + def test_spec_set_inherit(self): + @patch('%s.SomeClass' % __name__, spec_set=True) + def test(MockClass): + instance = MockClass() + instance.z = 'foo' + + self.assertRaises(AttributeError, test) + + + def test_patch_start_stop(self): + original = something + patcher = patch('%s.something' % __name__) + self.assertIs(something, original) + mock = patcher.start() + try: + self.assertIsNot(mock, original) + self.assertIs(something, mock) + finally: + patcher.stop() + self.assertIs(something, original) + + + def test_stop_without_start(self): + patcher = patch(foo_name, 'bar', 3) + + # calling stop without start used to produce a very obscure error + self.assertRaises(RuntimeError, patcher.stop) + + + def test_patchobject_start_stop(self): + original = something + patcher = patch.object(PTModule, 'something', 'foo') + self.assertIs(something, original) + replaced = patcher.start() + try: + self.assertEqual(replaced, 'foo') + self.assertIs(something, replaced) + finally: + patcher.stop() + self.assertIs(something, original) + + + def test_patch_dict_start_stop(self): + d = {'foo': 'bar'} + original = d.copy() + patcher = patch.dict(d, [('spam', 'eggs')], clear=True) + self.assertEqual(d, original) + + patcher.start() + try: + self.assertEqual(d, {'spam': 'eggs'}) + finally: + patcher.stop() + self.assertEqual(d, original) + + + def test_patch_dict_class_decorator(self): + this = self + d = {'spam': 'eggs'} + original = d.copy() + + class Test(object): + def test_first(self): + this.assertEqual(d, {'foo': 'bar'}) + def test_second(self): + this.assertEqual(d, {'foo': 'bar'}) + + Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test) + self.assertEqual(d, original) + + test = Test() + + test.test_first() + self.assertEqual(d, original) + + test.test_second() + self.assertEqual(d, original) + + test = Test() + + test.test_first() + self.assertEqual(d, original) + + test.test_second() + self.assertEqual(d, original) + + + def test_get_only_proxy(self): + class Something(object): + foo = 'foo' + class SomethingElse: + foo = 'foo' + + for thing in Something, SomethingElse, Something(), SomethingElse: + proxy = _get_proxy(thing) + + @patch.object(proxy, 'foo', 'bar') + def test(): + self.assertEqual(proxy.foo, 'bar') + test() + self.assertEqual(proxy.foo, 'foo') + self.assertEqual(thing.foo, 'foo') + self.assertNotIn('foo', proxy.__dict__) + + + def test_get_set_delete_proxy(self): + class Something(object): + foo = 'foo' + class SomethingElse: + foo = 'foo' + + for thing in Something, SomethingElse, Something(), SomethingElse: + proxy = _get_proxy(Something, get_only=False) + + @patch.object(proxy, 'foo', 'bar') + def test(): + self.assertEqual(proxy.foo, 'bar') + test() + self.assertEqual(proxy.foo, 'foo') + self.assertEqual(thing.foo, 'foo') + self.assertNotIn('foo', proxy.__dict__) + + + def test_patch_keyword_args(self): + kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33, + 'foo': MagicMock()} + + patcher = patch(foo_name, **kwargs) + mock = patcher.start() + patcher.stop() + + self.assertRaises(KeyError, mock) + self.assertEqual(mock.foo.bar(), 33) + self.assertIsInstance(mock.foo, MagicMock) + + + def test_patch_object_keyword_args(self): + kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33, + 'foo': MagicMock()} + + patcher = patch.object(Foo, 'f', **kwargs) + mock = patcher.start() + patcher.stop() + + self.assertRaises(KeyError, mock) + self.assertEqual(mock.foo.bar(), 33) + self.assertIsInstance(mock.foo, MagicMock) + + + def test_patch_dict_keyword_args(self): + original = {'foo': 'bar'} + copy = original.copy() + + patcher = patch.dict(original, foo=3, bar=4, baz=5) + patcher.start() + + try: + self.assertEqual(original, dict(foo=3, bar=4, baz=5)) + finally: + patcher.stop() + + self.assertEqual(original, copy) + + + def test_autospec(self): + class Boo(object): + def __init__(self, a): + pass + def f(self, a): + pass + def g(self): + pass + foo = 'bar' + + class Bar(object): + def a(self): + pass + + def _test(mock): + mock(1) + mock.assert_called_with(1) + self.assertRaises(TypeError, mock) + + def _test2(mock): + mock.f(1) + mock.f.assert_called_with(1) + self.assertRaises(TypeError, mock.f) + + mock.g() + mock.g.assert_called_with() + self.assertRaises(TypeError, mock.g, 1) + + self.assertRaises(AttributeError, getattr, mock, 'h') + + mock.foo.lower() + mock.foo.lower.assert_called_with() + self.assertRaises(AttributeError, getattr, mock.foo, 'bar') + + mock.Bar() + mock.Bar.assert_called_with() + + mock.Bar.a() + mock.Bar.a.assert_called_with() + self.assertRaises(TypeError, mock.Bar.a, 1) + + mock.Bar().a() + mock.Bar().a.assert_called_with() + self.assertRaises(TypeError, mock.Bar().a, 1) + + self.assertRaises(AttributeError, getattr, mock.Bar, 'b') + self.assertRaises(AttributeError, getattr, mock.Bar(), 'b') + + def function(mock): + _test(mock) + _test2(mock) + _test2(mock(1)) + self.assertIs(mock, Foo) + return mock + + test = patch(foo_name, autospec=True)(function) + + mock = test() + self.assertIsNot(Foo, mock) + # test patching a second time works + test() + + module = sys.modules[__name__] + test = patch.object(module, 'Foo', autospec=True)(function) + + mock = test() + self.assertIsNot(Foo, mock) + # test patching a second time works + test() + + + def test_autospec_function(self): + @patch('%s.function' % __name__, autospec=True) + def test(mock): + function(1) + function.assert_called_with(1) + function(2, 3) + function.assert_called_with(2, 3) + + self.assertRaises(TypeError, function) + self.assertRaises(AttributeError, getattr, function, 'foo') + + test() + + + def test_autospec_keywords(self): + @patch('%s.function' % __name__, autospec=True, + return_value=3) + def test(mock_function): + #self.assertEqual(function.abc, 'foo') + return function(1, 2) + + result = test() + self.assertEqual(result, 3) + + + def test_autospec_with_new(self): + patcher = patch('%s.function' % __name__, new=3, autospec=True) + self.assertRaises(TypeError, patcher.start) + + module = sys.modules[__name__] + patcher = patch.object(module, 'function', new=3, autospec=True) + self.assertRaises(TypeError, patcher.start) + + + def test_autospec_with_object(self): + class Bar(Foo): + extra = [] + + patcher = patch(foo_name, autospec=Bar) + mock = patcher.start() + try: + self.assertIsInstance(mock, Bar) + self.assertIsInstance(mock.extra, list) + finally: + patcher.stop() + + + def test_autospec_inherits(self): + FooClass = Foo + patcher = patch(foo_name, autospec=True) + mock = patcher.start() + try: + self.assertIsInstance(mock, FooClass) + self.assertIsInstance(mock(3), FooClass) + finally: + patcher.stop() + + + def test_autospec_name(self): + patcher = patch(foo_name, autospec=True) + mock = patcher.start() + + try: + self.assertIn(" name='Foo'", repr(mock)) + self.assertIn(" name='Foo.f'", repr(mock.f)) + self.assertIn(" name='Foo()'", repr(mock(None))) + self.assertIn(" name='Foo().f'", repr(mock(None).f)) + finally: + patcher.stop() + + + def test_tracebacks(self): + @patch.object(Foo, 'f', object()) + def test(): + raise AssertionError + try: + test() + except: + err = sys.exc_info() + + result = unittest.TextTestResult(None, None, 0) + traceback = result._exc_info_to_string(err, self) + self.assertIn('raise AssertionError', traceback) + + + def test_new_callable_patch(self): + patcher = patch(foo_name, new_callable=NonCallableMagicMock) + + m1 = patcher.start() + patcher.stop() + m2 = patcher.start() + patcher.stop() + + self.assertIsNot(m1, m2) + for mock in m1, m2: + self.assertNotCallable(m1) + + + def test_new_callable_patch_object(self): + patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock) + + m1 = patcher.start() + patcher.stop() + m2 = patcher.start() + patcher.stop() + + self.assertIsNot(m1, m2) + for mock in m1, m2: + self.assertNotCallable(m1) + + + def test_new_callable_keyword_arguments(self): + class Bar(object): + kwargs = None + def __init__(self, **kwargs): + Bar.kwargs = kwargs + + patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2) + m = patcher.start() + try: + self.assertIs(type(m), Bar) + self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2)) + finally: + patcher.stop() + + + def test_new_callable_spec(self): + class Bar(object): + kwargs = None + def __init__(self, **kwargs): + Bar.kwargs = kwargs + + patcher = patch(foo_name, new_callable=Bar, spec=Bar) + patcher.start() + try: + self.assertEqual(Bar.kwargs, dict(spec=Bar)) + finally: + patcher.stop() + + patcher = patch(foo_name, new_callable=Bar, spec_set=Bar) + patcher.start() + try: + self.assertEqual(Bar.kwargs, dict(spec_set=Bar)) + finally: + patcher.stop() + + + def test_new_callable_create(self): + non_existent_attr = '%s.weeeee' % foo_name + p = patch(non_existent_attr, new_callable=NonCallableMock) + self.assertRaises(AttributeError, p.start) + + p = patch(non_existent_attr, new_callable=NonCallableMock, + create=True) + m = p.start() + try: + self.assertNotCallable(m, magic=False) + finally: + p.stop() + + + def test_new_callable_incompatible_with_new(self): + self.assertRaises( + ValueError, patch, foo_name, new=object(), new_callable=MagicMock + ) + self.assertRaises( + ValueError, patch.object, Foo, 'f', new=object(), + new_callable=MagicMock + ) + + + def test_new_callable_incompatible_with_autospec(self): + self.assertRaises( + ValueError, patch, foo_name, new_callable=MagicMock, + autospec=True + ) + self.assertRaises( + ValueError, patch.object, Foo, 'f', new_callable=MagicMock, + autospec=True + ) + + + def test_new_callable_inherit_for_mocks(self): + class MockSub(Mock): + pass + + MockClasses = ( + NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub + ) + for Klass in MockClasses: + for arg in 'spec', 'spec_set': + kwargs = {arg: True} + p = patch(foo_name, new_callable=Klass, **kwargs) + m = p.start() + try: + instance = m.return_value + self.assertRaises(AttributeError, getattr, instance, 'x') + finally: + p.stop() + + + def test_new_callable_inherit_non_mock(self): + class NotAMock(object): + def __init__(self, spec): + self.spec = spec + + p = patch(foo_name, new_callable=NotAMock, spec=True) + m = p.start() + try: + self.assertTrue(is_instance(m, NotAMock)) + self.assertRaises(AttributeError, getattr, m, 'return_value') + finally: + p.stop() + + self.assertEqual(m.spec, Foo) + + + def test_new_callable_class_decorating(self): + test = self + original = Foo + class SomeTest(object): + + def _test(self, mock_foo): + test.assertIsNot(Foo, original) + test.assertIs(Foo, mock_foo) + test.assertIsInstance(Foo, SomeClass) + + def test_two(self, mock_foo): + self._test(mock_foo) + def test_one(self, mock_foo): + self._test(mock_foo) + + SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest) + SomeTest().test_one() + SomeTest().test_two() + self.assertIs(Foo, original) + + + def test_patch_multiple(self): + original_foo = Foo + original_f = Foo.f + original_g = Foo.g + + patcher1 = patch.multiple(foo_name, f=1, g=2) + patcher2 = patch.multiple(Foo, f=1, g=2) + + for patcher in patcher1, patcher2: + patcher.start() + try: + self.assertIs(Foo, original_foo) + self.assertEqual(Foo.f, 1) + self.assertEqual(Foo.g, 2) + finally: + patcher.stop() + + self.assertIs(Foo, original_foo) + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + @patch.multiple(foo_name, f=3, g=4) + def test(): + self.assertIs(Foo, original_foo) + self.assertEqual(Foo.f, 3) + self.assertEqual(Foo.g, 4) + + test() + + + def test_patch_multiple_no_kwargs(self): + self.assertRaises(ValueError, patch.multiple, foo_name) + self.assertRaises(ValueError, patch.multiple, Foo) + + + def test_patch_multiple_create_mocks(self): + original_foo = Foo + original_f = Foo.f + original_g = Foo.g + + @patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT) + def test(f, foo): + self.assertIs(Foo, original_foo) + self.assertIs(Foo.f, f) + self.assertEqual(Foo.g, 3) + self.assertIs(Foo.foo, foo) + self.assertTrue(is_instance(f, MagicMock)) + self.assertTrue(is_instance(foo, MagicMock)) + + test() + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_create_mocks_different_order(self): + # bug revealed by Jython! + original_f = Foo.f + original_g = Foo.g + + patcher = patch.object(Foo, 'f', 3) + patcher.attribute_name = 'f' + + other = patch.object(Foo, 'g', DEFAULT) + other.attribute_name = 'g' + patcher.additional_patchers = [other] + + @patcher + def test(g): + self.assertIs(Foo.g, g) + self.assertEqual(Foo.f, 3) + + test() + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_stacked_decorators(self): + original_foo = Foo + original_f = Foo.f + original_g = Foo.g + + @patch.multiple(foo_name, f=DEFAULT) + @patch.multiple(foo_name, foo=DEFAULT) + @patch(foo_name + '.g') + def test1(g, **kwargs): + _test(g, **kwargs) + + @patch.multiple(foo_name, f=DEFAULT) + @patch(foo_name + '.g') + @patch.multiple(foo_name, foo=DEFAULT) + def test2(g, **kwargs): + _test(g, **kwargs) + + @patch(foo_name + '.g') + @patch.multiple(foo_name, f=DEFAULT) + @patch.multiple(foo_name, foo=DEFAULT) + def test3(g, **kwargs): + _test(g, **kwargs) + + def _test(g, **kwargs): + f = kwargs.pop('f') + foo = kwargs.pop('foo') + self.assertFalse(kwargs) + + self.assertIs(Foo, original_foo) + self.assertIs(Foo.f, f) + self.assertIs(Foo.g, g) + self.assertIs(Foo.foo, foo) + self.assertTrue(is_instance(f, MagicMock)) + self.assertTrue(is_instance(g, MagicMock)) + self.assertTrue(is_instance(foo, MagicMock)) + + test1() + test2() + test3() + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_create_mocks_patcher(self): + original_foo = Foo + original_f = Foo.f + original_g = Foo.g + + patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT) + + result = patcher.start() + try: + f = result['f'] + foo = result['foo'] + self.assertEqual(set(result), set(['f', 'foo'])) + + self.assertIs(Foo, original_foo) + self.assertIs(Foo.f, f) + self.assertIs(Foo.foo, foo) + self.assertTrue(is_instance(f, MagicMock)) + self.assertTrue(is_instance(foo, MagicMock)) + finally: + patcher.stop() + + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_decorating_class(self): + test = self + original_foo = Foo + original_f = Foo.f + original_g = Foo.g + + class SomeTest(object): + + def _test(self, f, foo): + test.assertIs(Foo, original_foo) + test.assertIs(Foo.f, f) + test.assertEqual(Foo.g, 3) + test.assertIs(Foo.foo, foo) + test.assertTrue(is_instance(f, MagicMock)) + test.assertTrue(is_instance(foo, MagicMock)) + + def test_two(self, f, foo): + self._test(f, foo) + def test_one(self, f, foo): + self._test(f, foo) + + SomeTest = patch.multiple( + foo_name, f=DEFAULT, g=3, foo=DEFAULT + )(SomeTest) + + thing = SomeTest() + thing.test_one() + thing.test_two() + + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_create(self): + patcher = patch.multiple(Foo, blam='blam') + self.assertRaises(AttributeError, patcher.start) + + patcher = patch.multiple(Foo, blam='blam', create=True) + patcher.start() + try: + self.assertEqual(Foo.blam, 'blam') + finally: + patcher.stop() + + self.assertFalse(hasattr(Foo, 'blam')) + + + def test_patch_multiple_spec_set(self): + # if spec_set works then we can assume that spec and autospec also + # work as the underlying machinery is the same + patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b']) + result = patcher.start() + try: + self.assertEqual(Foo.foo, result['foo']) + Foo.foo.a(1) + Foo.foo.b(2) + Foo.foo.a.assert_called_with(1) + Foo.foo.b.assert_called_with(2) + self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None) + finally: + patcher.stop() + + + def test_patch_multiple_new_callable(self): + class Thing(object): + pass + + patcher = patch.multiple( + Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing + ) + result = patcher.start() + try: + self.assertIs(Foo.f, result['f']) + self.assertIs(Foo.g, result['g']) + self.assertIsInstance(Foo.f, Thing) + self.assertIsInstance(Foo.g, Thing) + self.assertIsNot(Foo.f, Foo.g) + finally: + patcher.stop() + + + def test_nested_patch_failure(self): + original_f = Foo.f + original_g = Foo.g + + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'missing', 1) + @patch.object(Foo, 'f', 1) + def thing1(): + pass + + @patch.object(Foo, 'missing', 1) + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'f', 1) + def thing2(): + pass + + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'f', 1) + @patch.object(Foo, 'missing', 1) + def thing3(): + pass + + for func in thing1, thing2, thing3: + self.assertRaises(AttributeError, func) + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_new_callable_failure(self): + original_f = Foo.f + original_g = Foo.g + original_foo = Foo.foo + + def crasher(): + raise NameError('crasher') + + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'foo', new_callable=crasher) + @patch.object(Foo, 'f', 1) + def thing1(): + pass + + @patch.object(Foo, 'foo', new_callable=crasher) + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'f', 1) + def thing2(): + pass + + @patch.object(Foo, 'g', 1) + @patch.object(Foo, 'f', 1) + @patch.object(Foo, 'foo', new_callable=crasher) + def thing3(): + pass + + for func in thing1, thing2, thing3: + self.assertRaises(NameError, func) + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + self.assertEqual(Foo.foo, original_foo) + + + def test_patch_multiple_failure(self): + original_f = Foo.f + original_g = Foo.g + + patcher = patch.object(Foo, 'f', 1) + patcher.attribute_name = 'f' + + good = patch.object(Foo, 'g', 1) + good.attribute_name = 'g' + + bad = patch.object(Foo, 'missing', 1) + bad.attribute_name = 'missing' + + for additionals in [good, bad], [bad, good]: + patcher.additional_patchers = additionals + + @patcher + def func(): + pass + + self.assertRaises(AttributeError, func) + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + + + def test_patch_multiple_new_callable_failure(self): + original_f = Foo.f + original_g = Foo.g + original_foo = Foo.foo + + def crasher(): + raise NameError('crasher') + + patcher = patch.object(Foo, 'f', 1) + patcher.attribute_name = 'f' + + good = patch.object(Foo, 'g', 1) + good.attribute_name = 'g' + + bad = patch.object(Foo, 'foo', new_callable=crasher) + bad.attribute_name = 'foo' + + for additionals in [good, bad], [bad, good]: + patcher.additional_patchers = additionals + + @patcher + def func(): + pass + + self.assertRaises(NameError, func) + self.assertEqual(Foo.f, original_f) + self.assertEqual(Foo.g, original_g) + self.assertEqual(Foo.foo, original_foo) + + + def test_patch_multiple_string_subclasses(self): + for base in (str, unicode): + Foo = type('Foo', (base,), {'fish': 'tasty'}) + foo = Foo() + @patch.multiple(foo, fish='nearly gone') + def test(): + self.assertEqual(foo.fish, 'nearly gone') + + test() + self.assertEqual(foo.fish, 'tasty') + + + @patch('mock.patch.TEST_PREFIX', 'foo') + def test_patch_test_prefix(self): + class Foo(object): + thing = 'original' + + def foo_one(self): + return self.thing + def foo_two(self): + return self.thing + def test_one(self): + return self.thing + def test_two(self): + return self.thing + + Foo = patch.object(Foo, 'thing', 'changed')(Foo) + + foo = Foo() + self.assertEqual(foo.foo_one(), 'changed') + self.assertEqual(foo.foo_two(), 'changed') + self.assertEqual(foo.test_one(), 'original') + self.assertEqual(foo.test_two(), 'original') + + + @patch('mock.patch.TEST_PREFIX', 'bar') + def test_patch_dict_test_prefix(self): + class Foo(object): + def bar_one(self): + return dict(the_dict) + def bar_two(self): + return dict(the_dict) + def test_one(self): + return dict(the_dict) + def test_two(self): + return dict(the_dict) + + the_dict = {'key': 'original'} + Foo = patch.dict(the_dict, key='changed')(Foo) + + foo =Foo() + self.assertEqual(foo.bar_one(), {'key': 'changed'}) + self.assertEqual(foo.bar_two(), {'key': 'changed'}) + self.assertEqual(foo.test_one(), {'key': 'original'}) + self.assertEqual(foo.test_two(), {'key': 'original'}) + + + def test_patch_with_spec_mock_repr(self): + for arg in ('spec', 'autospec', 'spec_set'): + p = patch('%s.SomeClass' % __name__, **{arg: True}) + m = p.start() + try: + self.assertIn(" name='SomeClass'", repr(m)) + self.assertIn(" name='SomeClass.class_attribute'", + repr(m.class_attribute)) + self.assertIn(" name='SomeClass()'", repr(m())) + self.assertIn(" name='SomeClass().class_attribute'", + repr(m().class_attribute)) + finally: + p.stop() + + + def test_patch_nested_autospec_repr(self): + p = patch('mock.tests.support', autospec=True) + m = p.start() + try: + self.assertIn(" name='support.SomeClass.wibble()'", + repr(m.SomeClass.wibble())) + self.assertIn(" name='support.SomeClass().wibble()'", + repr(m.SomeClass().wibble())) + finally: + p.stop() + + + def test_mock_calls_with_patch(self): + for arg in ('spec', 'autospec', 'spec_set'): + p = patch('%s.SomeClass' % __name__, **{arg: True}) + m = p.start() + try: + m.wibble() + + kalls = [call.wibble()] + self.assertEqual(m.mock_calls, kalls) + self.assertEqual(m.method_calls, kalls) + self.assertEqual(m.wibble.mock_calls, [call()]) + + result = m() + kalls.append(call()) + self.assertEqual(m.mock_calls, kalls) + + result.wibble() + kalls.append(call().wibble()) + self.assertEqual(m.mock_calls, kalls) + + self.assertEqual(result.mock_calls, [call.wibble()]) + self.assertEqual(result.wibble.mock_calls, [call()]) + self.assertEqual(result.method_calls, [call.wibble()]) + finally: + p.stop() + + + def test_patch_imports_lazily(self): + sys.modules.pop('squizz', None) + + p1 = patch('squizz.squozz') + self.assertRaises(ImportError, p1.start) + + squizz = Mock() + squizz.squozz = 6 + sys.modules['squizz'] = squizz + p1 = patch('squizz.squozz') + squizz.squozz = 3 + p1.start() + p1.stop() + self.assertEqual(squizz.squozz, 3) + + + def test_patch_propogrates_exc_on_exit(self): + class holder: + exc_info = None, None, None + + class custom_patch(_patch): + def __exit__(self, etype=None, val=None, tb=None): + _patch.__exit__(self, etype, val, tb) + holder.exc_info = etype, val, tb + stop = __exit__ + + def with_custom_patch(target): + getter, attribute = _get_target(target) + return custom_patch( + getter, attribute, DEFAULT, None, False, None, + None, None, {} + ) + + @with_custom_patch('squizz.squozz') + def test(mock): + raise RuntimeError + + self.assertRaises(RuntimeError, test) + self.assertIs(holder.exc_info[0], RuntimeError) + self.assertIsNotNone(holder.exc_info[1], + 'exception value not propgated') + self.assertIsNotNone(holder.exc_info[2], + 'exception traceback not propgated') + + + def test_create_and_specs(self): + for kwarg in ('spec', 'spec_set', 'autospec'): + p = patch('%s.doesnotexist' % __name__, create=True, + **{kwarg: True}) + self.assertRaises(TypeError, p.start) + self.assertRaises(NameError, lambda: doesnotexist) + + # check that spec with create is innocuous if the original exists + p = patch(MODNAME, create=True, **{kwarg: True}) + p.start() + p.stop() + + + def test_multiple_specs(self): + original = PTModule + for kwarg in ('spec', 'spec_set'): + p = patch(MODNAME, autospec=0, **{kwarg: 0}) + self.assertRaises(TypeError, p.start) + self.assertIs(PTModule, original) + + for kwarg in ('spec', 'autospec'): + p = patch(MODNAME, spec_set=0, **{kwarg: 0}) + self.assertRaises(TypeError, p.start) + self.assertIs(PTModule, original) + + for kwarg in ('spec_set', 'autospec'): + p = patch(MODNAME, spec=0, **{kwarg: 0}) + self.assertRaises(TypeError, p.start) + self.assertIs(PTModule, original) + + + def test_specs_false_instead_of_none(self): + p = patch(MODNAME, spec=False, spec_set=False, autospec=False) + mock = p.start() + try: + # no spec should have been set, so attribute access should not fail + mock.does_not_exist + mock.does_not_exist = 3 + finally: + p.stop() + + + def test_falsey_spec(self): + for kwarg in ('spec', 'autospec', 'spec_set'): + p = patch(MODNAME, **{kwarg: 0}) + m = p.start() + try: + self.assertRaises(AttributeError, getattr, m, 'doesnotexit') + finally: + p.stop() + + + def test_spec_set_true(self): + for kwarg in ('spec', 'autospec'): + p = patch(MODNAME, spec_set=True, **{kwarg: True}) + m = p.start() + try: + self.assertRaises(AttributeError, setattr, m, + 'doesnotexist', 'something') + self.assertRaises(AttributeError, getattr, m, 'doesnotexist') + finally: + p.stop() + + + def test_callable_spec_as_list(self): + spec = ('__call__',) + p = patch(MODNAME, spec=spec) + m = p.start() + try: + self.assertTrue(callable(m)) + finally: + p.stop() + + + def test_not_callable_spec_as_list(self): + spec = ('foo', 'bar') + p = patch(MODNAME, spec=spec) + m = p.start() + try: + self.assertFalse(callable(m)) + finally: + p.stop() + + + def test_patch_stopall(self): + unlink = os.unlink + chdir = os.chdir + path = os.path + patch('os.unlink', something).start() + patch('os.chdir', something_else).start() + + @patch('os.path') + def patched(mock_path): + patch.stopall() + self.assertIs(os.path, mock_path) + self.assertIs(os.unlink, unlink) + self.assertIs(os.chdir, chdir) + + patched() + self.assertIs(os.path, path) + + + def test_wrapped_patch(self): + decorated = patch('sys.modules')(function) + self.assertIs(decorated.__wrapped__, function) + + + def test_wrapped_several_times_patch(self): + decorated = patch('sys.modules')(function) + decorated = patch('sys.modules')(decorated) + self.assertIs(decorated.__wrapped__, function) + + + def test_wrapped_patch_object(self): + decorated = patch.object(sys, 'modules')(function) + self.assertIs(decorated.__wrapped__, function) + + + def test_wrapped_patch_dict(self): + decorated = patch.dict('sys.modules')(function) + self.assertIs(decorated.__wrapped__, function) + + + def test_wrapped_patch_multiple(self): + decorated = patch.multiple('sys', modules={})(function) + self.assertIs(decorated.__wrapped__, function) + + def test_stopall_lifo(self): + stopped = [] + class thing(object): + one = two = three = None + + def get_patch(attribute): + class mypatch(_patch): + def stop(self): + stopped.append(attribute) + return super(mypatch, self).stop() + return mypatch(lambda: thing, attribute, None, None, + False, None, None, None, {}) + [get_patch(val).start() for val in ("one", "two", "three")] + patch.stopall() + + self.assertEqual(stopped, ["three", "two", "one"]) + + + def test_special_attrs(self): + def foo(x=0): + """TEST""" + return x + with patch.object(foo, '__defaults__', (1, )): + self.assertEqual(foo(), 1) + self.assertEqual(foo(), 0) + + with patch.object(foo, '__doc__', "FUN"): + self.assertEqual(foo.__doc__, "FUN") + self.assertEqual(foo.__doc__, "TEST") + + with patch.object(foo, '__module__', "testpatch2"): + self.assertEqual(foo.__module__, "testpatch2") + self.assertEqual(foo.__module__, __name__) + + if hasattr(self.test_special_attrs, '__annotations__'): + with patch.object(foo, '__annotations__', dict([('s', 1, )])): + self.assertEqual(foo.__annotations__, dict([('s', 1, )])) + self.assertEqual(foo.__annotations__, dict()) + + if hasattr(self.test_special_attrs, '__kwdefaults__'): + foo = eval("lambda *a, x=0: x") + with patch.object(foo, '__kwdefaults__', dict([('x', 1, )])): + self.assertEqual(foo(), 1) + self.assertEqual(foo(), 0) + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/testsentinel.py b/ext/mock/tests/testsentinel.py new file mode 100644 index 0000000000..69b2042725 --- /dev/null +++ b/ext/mock/tests/testsentinel.py @@ -0,0 +1,33 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +import unittest2 as unittest + +from mock import sentinel, DEFAULT + + +class SentinelTest(unittest.TestCase): + + def testSentinels(self): + self.assertEqual(sentinel.whatever, sentinel.whatever, + 'sentinel not stored') + self.assertNotEqual(sentinel.whatever, sentinel.whateverelse, + 'sentinel should be unique') + + + def testSentinelName(self): + self.assertEqual(str(sentinel.whatever), 'sentinel.whatever', + 'sentinel name incorrect') + + + def testDEFAULT(self): + self.assertIs(DEFAULT, sentinel.DEFAULT) + + def testBases(self): + # If this doesn't raise an AttributeError then help(mock) is broken + self.assertRaises(AttributeError, lambda: sentinel.__bases__) + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/mock/tests/testwith.py b/ext/mock/tests/testwith.py new file mode 100644 index 0000000000..aa7812b306 --- /dev/null +++ b/ext/mock/tests/testwith.py @@ -0,0 +1,306 @@ +# Copyright (C) 2007-2012 Michael Foord & the mock team +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# http://www.voidspace.org.uk/python/mock/ + +from warnings import catch_warnings + +import unittest2 as unittest + +from mock.tests.support import is_instance +from mock import MagicMock, Mock, patch, sentinel, mock_open, call + + +something = sentinel.Something +something_else = sentinel.SomethingElse + + + +class WithTest(unittest.TestCase): + + def test_with_statement(self): + with patch('%s.something' % __name__, sentinel.Something2): + self.assertEqual(something, sentinel.Something2, "unpatched") + self.assertEqual(something, sentinel.Something) + + + def test_with_statement_exception(self): + try: + with patch('%s.something' % __name__, sentinel.Something2): + self.assertEqual(something, sentinel.Something2, "unpatched") + raise Exception('pow') + except Exception: + pass + else: + self.fail("patch swallowed exception") + self.assertEqual(something, sentinel.Something) + + + def test_with_statement_as(self): + with patch('%s.something' % __name__) as mock_something: + self.assertEqual(something, mock_something, "unpatched") + self.assertTrue(is_instance(mock_something, MagicMock), + "patching wrong type") + self.assertEqual(something, sentinel.Something) + + + def test_patch_object_with_statement(self): + class Foo(object): + something = 'foo' + original = Foo.something + with patch.object(Foo, 'something'): + self.assertNotEqual(Foo.something, original, "unpatched") + self.assertEqual(Foo.something, original) + + + def test_with_statement_nested(self): + with catch_warnings(record=True): + with patch('%s.something' % __name__) as mock_something: + with patch('%s.something_else' % __name__) as mock_something_else: + self.assertEqual(something, mock_something, "unpatched") + self.assertEqual(something_else, mock_something_else, + "unpatched") + + self.assertEqual(something, sentinel.Something) + self.assertEqual(something_else, sentinel.SomethingElse) + + + def test_with_statement_specified(self): + with patch('%s.something' % __name__, sentinel.Patched) as mock_something: + self.assertEqual(something, mock_something, "unpatched") + self.assertEqual(mock_something, sentinel.Patched, "wrong patch") + self.assertEqual(something, sentinel.Something) + + + def testContextManagerMocking(self): + mock = Mock() + mock.__enter__ = Mock() + mock.__exit__ = Mock() + mock.__exit__.return_value = False + + with mock as m: + self.assertEqual(m, mock.__enter__.return_value) + mock.__enter__.assert_called_with() + mock.__exit__.assert_called_with(None, None, None) + + + def test_context_manager_with_magic_mock(self): + mock = MagicMock() + + with self.assertRaises(TypeError): + with mock: + 'foo' + 3 + mock.__enter__.assert_called_with() + self.assertTrue(mock.__exit__.called) + + + def test_with_statement_same_attribute(self): + with patch('%s.something' % __name__, sentinel.Patched) as mock_something: + self.assertEqual(something, mock_something, "unpatched") + + with patch('%s.something' % __name__) as mock_again: + self.assertEqual(something, mock_again, "unpatched") + + self.assertEqual(something, mock_something, + "restored with wrong instance") + + self.assertEqual(something, sentinel.Something, "not restored") + + + def test_with_statement_imbricated(self): + with patch('%s.something' % __name__) as mock_something: + self.assertEqual(something, mock_something, "unpatched") + + with patch('%s.something_else' % __name__) as mock_something_else: + self.assertEqual(something_else, mock_something_else, + "unpatched") + + self.assertEqual(something, sentinel.Something) + self.assertEqual(something_else, sentinel.SomethingElse) + + + def test_dict_context_manager(self): + foo = {} + with patch.dict(foo, {'a': 'b'}): + self.assertEqual(foo, {'a': 'b'}) + self.assertEqual(foo, {}) + + with self.assertRaises(NameError): + with patch.dict(foo, {'a': 'b'}): + self.assertEqual(foo, {'a': 'b'}) + raise NameError('Konrad') + + self.assertEqual(foo, {}) + + + +class TestMockOpen(unittest.TestCase): + + def test_mock_open(self): + mock = mock_open() + with patch('%s.open' % __name__, mock, create=True) as patched: + self.assertIs(patched, mock) + open('foo') + + mock.assert_called_once_with('foo') + + + def test_mock_open_context_manager(self): + mock = mock_open() + handle = mock.return_value + with patch('%s.open' % __name__, mock, create=True): + with open('foo') as f: + f.read() + + expected_calls = [call('foo'), call().__enter__(), call().read(), + call().__exit__(None, None, None)] + self.assertEqual(mock.mock_calls, expected_calls) + self.assertIs(f, handle) + + def test_mock_open_context_manager_multiple_times(self): + mock = mock_open() + with patch('%s.open' % __name__, mock, create=True): + with open('foo') as f: + f.read() + with open('bar') as f: + f.read() + + expected_calls = [ + call('foo'), call().__enter__(), call().read(), + call().__exit__(None, None, None), + call('bar'), call().__enter__(), call().read(), + call().__exit__(None, None, None)] + self.assertEqual(mock.mock_calls, expected_calls) + + def test_explicit_mock(self): + mock = MagicMock() + mock_open(mock) + + with patch('%s.open' % __name__, mock, create=True) as patched: + self.assertIs(patched, mock) + open('foo') + + mock.assert_called_once_with('foo') + + + def test_read_data(self): + mock = mock_open(read_data='foo') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + result = h.read() + + self.assertEqual(result, 'foo') + + + def test_readline_data(self): + # Check that readline will return all the lines from the fake file + mock = mock_open(read_data='foo\nbar\nbaz\n') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + line1 = h.readline() + line2 = h.readline() + line3 = h.readline() + self.assertEqual(line1, 'foo\n') + self.assertEqual(line2, 'bar\n') + self.assertEqual(line3, 'baz\n') + + # Check that we properly emulate a file that doesn't end in a newline + mock = mock_open(read_data='foo') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + result = h.readline() + self.assertEqual(result, 'foo') + + + def test_readlines_data(self): + # Test that emulating a file that ends in a newline character works + mock = mock_open(read_data='foo\nbar\nbaz\n') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + result = h.readlines() + self.assertEqual(result, ['foo\n', 'bar\n', 'baz\n']) + + # Test that files without a final newline will also be correctly + # emulated + mock = mock_open(read_data='foo\nbar\nbaz') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + result = h.readlines() + + self.assertEqual(result, ['foo\n', 'bar\n', 'baz']) + + + def test_read_bytes(self): + mock = mock_open(read_data=b'\xc6') + with patch('%s.open' % __name__, mock, create=True): + with open('abc', 'rb') as f: + result = f.read() + self.assertEqual(result, b'\xc6') + + + def test_readline_bytes(self): + m = mock_open(read_data=b'abc\ndef\nghi\n') + with patch('%s.open' % __name__, m, create=True): + with open('abc', 'rb') as f: + line1 = f.readline() + line2 = f.readline() + line3 = f.readline() + self.assertEqual(line1, b'abc\n') + self.assertEqual(line2, b'def\n') + self.assertEqual(line3, b'ghi\n') + + + def test_readlines_bytes(self): + m = mock_open(read_data=b'abc\ndef\nghi\n') + with patch('%s.open' % __name__, m, create=True): + with open('abc', 'rb') as f: + result = f.readlines() + self.assertEqual(result, [b'abc\n', b'def\n', b'ghi\n']) + + + def test_mock_open_read_with_argument(self): + # At one point calling read with an argument was broken + # for mocks returned by mock_open + some_data = 'foo\nbar\nbaz' + mock = mock_open(read_data=some_data) + self.assertEqual(mock().read(10), some_data) + + + def test_interleaved_reads(self): + # Test that calling read, readline, and readlines pulls data + # sequentially from the data we preload with + mock = mock_open(read_data='foo\nbar\nbaz\n') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + line1 = h.readline() + rest = h.readlines() + self.assertEqual(line1, 'foo\n') + self.assertEqual(rest, ['bar\n', 'baz\n']) + + mock = mock_open(read_data='foo\nbar\nbaz\n') + with patch('%s.open' % __name__, mock, create=True): + h = open('bar') + line1 = h.readline() + rest = h.read() + self.assertEqual(line1, 'foo\n') + self.assertEqual(rest, 'bar\nbaz\n') + + + def test_overriding_return_values(self): + mock = mock_open(read_data='foo') + handle = mock() + + handle.read.return_value = 'bar' + handle.readline.return_value = 'bar' + handle.readlines.return_value = ['bar'] + + self.assertEqual(handle.read(), 'bar') + self.assertEqual(handle.readline(), 'bar') + self.assertEqual(handle.readlines(), ['bar']) + + # call repeatedly to check that a StopIteration is not propagated + self.assertEqual(handle.readline(), 'bar') + self.assertEqual(handle.readline(), 'bar') + + +if __name__ == '__main__': + unittest.main() diff --git a/ext/msgpack/_version.py b/ext/msgpack/_version.py index 76bd8fbe0c..ecba3d8876 100644 --- a/ext/msgpack/_version.py +++ b/ext/msgpack/_version.py @@ -1 +1 @@ -version = (0, 4, 8) +version = (0, 5, 1) diff --git a/ext/msgpack/exceptions.py b/ext/msgpack/exceptions.py index f7678f135b..97668814f2 100644 --- a/ext/msgpack/exceptions.py +++ b/ext/msgpack/exceptions.py @@ -1,5 +1,5 @@ class UnpackException(Exception): - pass + """Deprecated. Use Exception instead to catch all exception during unpacking.""" class BufferFull(UnpackException): @@ -11,10 +11,10 @@ class OutOfData(UnpackException): class UnpackValueError(UnpackException, ValueError): - pass + """Deprecated. Use ValueError instead.""" -class ExtraData(ValueError): +class ExtraData(UnpackValueError): def __init__(self, unpacked, extra): self.unpacked = unpacked self.extra = extra @@ -22,8 +22,20 @@ def __init__(self, unpacked, extra): def __str__(self): return "unpack(b) received extra data." + class PackException(Exception): - pass + """Deprecated. Use Exception instead to catch all exception during packing.""" + class PackValueError(PackException, ValueError): - pass + """PackValueError is raised when type of input data is supported but it's value is unsupported. + + Deprecated. Use ValueError instead. + """ + + +class PackOverflowError(PackValueError, OverflowError): + """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32). + + Deprecated. Use ValueError instead. + """ diff --git a/ext/msgpack/fallback.py b/ext/msgpack/fallback.py index f682611de9..5447b530b8 100644 --- a/ext/msgpack/fallback.py +++ b/ext/msgpack/fallback.py @@ -1,8 +1,8 @@ """Fallback pure Python implementation of msgpack""" import sys -import array import struct +import warnings if sys.version_info[0] == 3: PY3 = True @@ -36,6 +36,10 @@ def __init__(self, s=b''): else: self.builder = StringBuilder() def write(self, s): + if isinstance(s, memoryview): + s = s.tobytes() + elif isinstance(s, bytearray): + s = bytes(s) self.builder.append(s) def getvalue(self): return self.builder.build() @@ -44,11 +48,13 @@ def getvalue(self): from io import BytesIO as StringIO newlist_hint = lambda size: [] + from msgpack.exceptions import ( BufferFull, OutOfData, UnpackValueError, PackValueError, + PackOverflowError, ExtraData) from msgpack import ExtType @@ -69,6 +75,31 @@ def getvalue(self): DEFAULT_RECURSE_LIMIT = 511 +def _check_type_strict(obj, t, type=type, tuple=tuple): + if type(t) is tuple: + return type(obj) in t + else: + return type(obj) is t + + +def _get_data_from_buffer(obj): + try: + view = memoryview(obj) + except TypeError: + # try to use legacy buffer protocol if 2.7, otherwise re-raise + if not PY3: + view = memoryview(buffer(obj)) + warnings.warn("using old buffer interface to unpack %s; " + "this leads to unpacking errors if slicing is used and " + "will be removed in a future version" % type(obj), + RuntimeWarning) + else: + raise + if view.itemsize != 1: + raise ValueError("cannot unpack from multi-byte object") + return view + + def unpack(stream, **kwargs): """ Unpack an object from `stream`. @@ -76,11 +107,8 @@ def unpack(stream, **kwargs): Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options. """ - unpacker = Unpacker(stream, **kwargs) - ret = unpacker._fb_unpack() - if unpacker._fb_got_extradata(): - raise ExtraData(ret, unpacker._fb_get_extradata()) - return ret + data = stream.read() + return unpackb(data, **kwargs) def unpackb(packed, **kwargs): @@ -93,11 +121,11 @@ def unpackb(packed, **kwargs): unpacker = Unpacker(None, **kwargs) unpacker.feed(packed) try: - ret = unpacker._fb_unpack() + ret = unpacker._unpack() except OutOfData: raise UnpackValueError("Data is not enough.") - if unpacker._fb_got_extradata(): - raise ExtraData(ret, unpacker._fb_get_extradata()) + if unpacker._got_extradata(): + raise ExtraData(ret, unpacker._get_extradata()) return ret @@ -111,7 +139,7 @@ class Unpacker(object): If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. :param int read_size: - Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`) + Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) :param bool use_list: If true, unpack msgpack array to Python list. @@ -132,13 +160,13 @@ class Unpacker(object): If it is None (default), msgpack raw is deserialized to Python bytes. :param str unicode_errors: - Used for decoding msgpack raw with *encoding*. + (deprecated) Used for decoding msgpack raw with *encoding*. (default: `'strict'`) :param int max_buffer_size: Limits size of data waiting unpacked. 0 means system's INT_MAX (default). Raises `BufferFull` exception when it is insufficient. - You shoud set this parameter when unpacking data from untrusted source. + You should set this parameter when unpacking data from untrusted source. :param int max_str_len: Limits max length of str. (default: 2**31-1) @@ -181,21 +209,17 @@ def __init__(self, file_like=None, read_size=0, use_list=True, max_map_len=2147483647, max_ext_len=2147483647): if file_like is None: - self._fb_feeding = True + self._feeding = True else: if not callable(file_like.read): raise TypeError("`file_like.read` must be callable") self.file_like = file_like - self._fb_feeding = False + self._feeding = False - #: array of bytes feeded. - self._fb_buffers = [] - #: Which buffer we currently reads - self._fb_buf_i = 0 + #: array of bytes fed. + self._buffer = bytearray() #: Which position we currently reads - self._fb_buf_o = 0 - #: Total size of _fb_bufferes - self._fb_buf_n = 0 + self._buff_i = 0 # When Unpacker is used as an iterable, between the calls to next(), # the buffer is not "consumed" completely, for efficiency sake. @@ -203,13 +227,13 @@ def __init__(self, file_like=None, read_size=0, use_list=True, # the correct moments, we have to keep track of how sloppy we were. # Furthermore, when the buffer is incomplete (that is: in the case # we raise an OutOfData) we need to rollback the buffer to the correct - # state, which _fb_slopiness records. - self._fb_sloppiness = 0 + # state, which _buf_checkpoint records. + self._buf_checkpoint = 0 self._max_buffer_size = max_buffer_size or 2**31-1 if read_size > self._max_buffer_size: raise ValueError("read_size must be smaller than max_buffer_size") - self._read_size = read_size or min(self._max_buffer_size, 4096) + self._read_size = read_size or min(self._max_buffer_size, 16*1024) self._encoding = encoding self._unicode_errors = unicode_errors self._use_list = use_list @@ -222,6 +246,7 @@ def __init__(self, file_like=None, read_size=0, use_list=True, self._max_array_len = max_array_len self._max_map_len = max_map_len self._max_ext_len = max_ext_len + self._stream_offset = 0 if list_hook is not None and not callable(list_hook): raise TypeError('`list_hook` is not callable') @@ -236,135 +261,92 @@ def __init__(self, file_like=None, read_size=0, use_list=True, raise TypeError("`ext_hook` is not callable") def feed(self, next_bytes): - if isinstance(next_bytes, array.array): - next_bytes = next_bytes.tostring() - elif isinstance(next_bytes, bytearray): - next_bytes = bytes(next_bytes) - assert self._fb_feeding - if (self._fb_buf_n + len(next_bytes) - self._fb_sloppiness - > self._max_buffer_size): + assert self._feeding + view = _get_data_from_buffer(next_bytes) + if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): raise BufferFull - self._fb_buf_n += len(next_bytes) - self._fb_buffers.append(next_bytes) - - def _fb_sloppy_consume(self): - """ Gets rid of some of the used parts of the buffer. """ - if self._fb_buf_i: - for i in xrange(self._fb_buf_i): - self._fb_buf_n -= len(self._fb_buffers[i]) - self._fb_buffers = self._fb_buffers[self._fb_buf_i:] - self._fb_buf_i = 0 - if self._fb_buffers: - self._fb_sloppiness = self._fb_buf_o - else: - self._fb_sloppiness = 0 + self._buffer += view - def _fb_consume(self): + def _consume(self): """ Gets rid of the used parts of the buffer. """ - if self._fb_buf_i: - for i in xrange(self._fb_buf_i): - self._fb_buf_n -= len(self._fb_buffers[i]) - self._fb_buffers = self._fb_buffers[self._fb_buf_i:] - self._fb_buf_i = 0 - if self._fb_buffers: - self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:] - self._fb_buf_n -= self._fb_buf_o - else: - self._fb_buf_n = 0 - self._fb_buf_o = 0 - self._fb_sloppiness = 0 - - def _fb_got_extradata(self): - if self._fb_buf_i != len(self._fb_buffers): - return True - if self._fb_feeding: - return False - if not self.file_like: - return False - if self.file_like.read(1): - return True - return False + self._stream_offset += self._buff_i - self._buf_checkpoint + self._buf_checkpoint = self._buff_i - def __iter__(self): - return self + def _got_extradata(self): + return self._buff_i < len(self._buffer) + + def _get_extradata(self): + return self._buffer[self._buff_i:] def read_bytes(self, n): - return self._fb_read(n) - - def _fb_rollback(self): - self._fb_buf_i = 0 - self._fb_buf_o = self._fb_sloppiness - - def _fb_get_extradata(self): - bufs = self._fb_buffers[self._fb_buf_i:] - if bufs: - bufs[0] = bufs[0][self._fb_buf_o:] - return b''.join(bufs) - - def _fb_read(self, n, write_bytes=None): - buffs = self._fb_buffers - # We have a redundant codepath for the most common case, such that - # pypy optimizes it properly. This is the case that the read fits - # in the current buffer. - if (write_bytes is None and self._fb_buf_i < len(buffs) and - self._fb_buf_o + n < len(buffs[self._fb_buf_i])): - self._fb_buf_o += n - return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o] - - # The remaining cases. - ret = b'' - while len(ret) != n: - sliced = n - len(ret) - if self._fb_buf_i == len(buffs): - if self._fb_feeding: - break - to_read = sliced - if self._read_size > to_read: - to_read = self._read_size - tmp = self.file_like.read(to_read) - if not tmp: - break - buffs.append(tmp) - self._fb_buf_n += len(tmp) - continue - ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced] - self._fb_buf_o += sliced - if self._fb_buf_o >= len(buffs[self._fb_buf_i]): - self._fb_buf_o = 0 - self._fb_buf_i += 1 - if len(ret) != n: - self._fb_rollback() + return self._read(n) + + def _read(self, n): + # (int) -> bytearray + self._reserve(n) + i = self._buff_i + self._buff_i = i+n + return self._buffer[i:i+n] + + def _reserve(self, n): + remain_bytes = len(self._buffer) - self._buff_i - n + + # Fast path: buffer has n bytes already + if remain_bytes >= 0: + return + + if self._feeding: + self._buff_i = self._buf_checkpoint + raise OutOfData + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Read from file + remain_bytes = -remain_bytes + while remain_bytes > 0: + to_read_bytes = max(self._read_size, remain_bytes) + read_data = self.file_like.read(to_read_bytes) + if not read_data: + break + assert isinstance(read_data, bytes) + self._buffer += read_data + remain_bytes -= len(read_data) + + if len(self._buffer) < n + self._buff_i: + self._buff_i = 0 # rollback raise OutOfData - if write_bytes is not None: - write_bytes(ret) - return ret - def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None): + def _read_header(self, execute=EX_CONSTRUCT): typ = TYPE_IMMEDIATE n = 0 obj = None - c = self._fb_read(1, write_bytes) - b = ord(c) - if b & 0b10000000 == 0: + self._reserve(1) + b = self._buffer[self._buff_i] + self._buff_i += 1 + if b & 0b10000000 == 0: obj = b elif b & 0b11100000 == 0b11100000: - obj = struct.unpack("b", c)[0] + obj = -1 - (b ^ 0xff) elif b & 0b11100000 == 0b10100000: n = b & 0b00011111 - obj = self._fb_read(n, write_bytes) typ = TYPE_RAW if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) elif b & 0b11110000 == 0b10010000: n = b & 0b00001111 typ = TYPE_ARRAY if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b & 0b11110000 == 0b10000000: n = b & 0b00001111 typ = TYPE_MAP if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) elif b == 0xc0: obj = None elif b == 0xc2: @@ -373,129 +355,185 @@ def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None): obj = True elif b == 0xc4: typ = TYPE_BIN - n = struct.unpack("B", self._fb_read(1, write_bytes))[0] + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) elif b == 0xc5: typ = TYPE_BIN - n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] + self._reserve(2) + n = struct.unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) elif b == 0xc6: typ = TYPE_BIN - n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] + self._reserve(4) + n = struct.unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 if n > self._max_bin_len: - raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) elif b == 0xc7: # ext 8 typ = TYPE_EXT - L, n = struct.unpack('Bb', self._fb_read(2, write_bytes)) + self._reserve(2) + L, n = struct.unpack_from('Bb', self._buffer, self._buff_i) + self._buff_i += 2 if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._fb_read(L, write_bytes) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) elif b == 0xc8: # ext 16 typ = TYPE_EXT - L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes)) + self._reserve(3) + L, n = struct.unpack_from('>Hb', self._buffer, self._buff_i) + self._buff_i += 3 if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._fb_read(L, write_bytes) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) elif b == 0xc9: # ext 32 typ = TYPE_EXT - L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes)) + self._reserve(5) + L, n = struct.unpack_from('>Ib', self._buffer, self._buff_i) + self._buff_i += 5 if L > self._max_ext_len: - raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) - obj = self._fb_read(L, write_bytes) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) elif b == 0xca: - obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0] + self._reserve(4) + obj = struct.unpack_from(">f", self._buffer, self._buff_i)[0] + self._buff_i += 4 elif b == 0xcb: - obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0] + self._reserve(8) + obj = struct.unpack_from(">d", self._buffer, self._buff_i)[0] + self._buff_i += 8 elif b == 0xcc: - obj = struct.unpack("B", self._fb_read(1, write_bytes))[0] + self._reserve(1) + obj = self._buffer[self._buff_i] + self._buff_i += 1 elif b == 0xcd: - obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0] + self._reserve(2) + obj = struct.unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 elif b == 0xce: - obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0] + self._reserve(4) + obj = struct.unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 elif b == 0xcf: - obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0] + self._reserve(8) + obj = struct.unpack_from(">Q", self._buffer, self._buff_i)[0] + self._buff_i += 8 elif b == 0xd0: - obj = struct.unpack("b", self._fb_read(1, write_bytes))[0] + self._reserve(1) + obj = struct.unpack_from("b", self._buffer, self._buff_i)[0] + self._buff_i += 1 elif b == 0xd1: - obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0] + self._reserve(2) + obj = struct.unpack_from(">h", self._buffer, self._buff_i)[0] + self._buff_i += 2 elif b == 0xd2: - obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0] + self._reserve(4) + obj = struct.unpack_from(">i", self._buffer, self._buff_i)[0] + self._buff_i += 4 elif b == 0xd3: - obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0] + self._reserve(8) + obj = struct.unpack_from(">q", self._buffer, self._buff_i)[0] + self._buff_i += 8 elif b == 0xd4: # fixext 1 typ = TYPE_EXT if self._max_ext_len < 1: - raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) - n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes)) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) + self._reserve(2) + n, obj = struct.unpack_from("b1s", self._buffer, self._buff_i) + self._buff_i += 2 elif b == 0xd5: # fixext 2 typ = TYPE_EXT if self._max_ext_len < 2: - raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) - n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes)) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) + self._reserve(3) + n, obj = struct.unpack_from("b2s", self._buffer, self._buff_i) + self._buff_i += 3 elif b == 0xd6: # fixext 4 typ = TYPE_EXT if self._max_ext_len < 4: - raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) - n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes)) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) + self._reserve(5) + n, obj = struct.unpack_from("b4s", self._buffer, self._buff_i) + self._buff_i += 5 elif b == 0xd7: # fixext 8 typ = TYPE_EXT if self._max_ext_len < 8: - raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) - n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes)) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) + self._reserve(9) + n, obj = struct.unpack_from("b8s", self._buffer, self._buff_i) + self._buff_i += 9 elif b == 0xd8: # fixext 16 typ = TYPE_EXT if self._max_ext_len < 16: - raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) - n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes)) + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) + self._reserve(17) + n, obj = struct.unpack_from("b16s", self._buffer, self._buff_i) + self._buff_i += 17 elif b == 0xd9: typ = TYPE_RAW - n = struct.unpack("B", self._fb_read(1, write_bytes))[0] + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) elif b == 0xda: typ = TYPE_RAW - n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) elif b == 0xdb: typ = TYPE_RAW - n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 if n > self._max_str_len: - raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) - obj = self._fb_read(n, write_bytes) + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) elif b == 0xdc: - n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] - if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) typ = TYPE_ARRAY - elif b == 0xdd: - n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 if n > self._max_array_len: - raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xdd: typ = TYPE_ARRAY + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) elif b == 0xde: - n = struct.unpack(">H", self._fb_read(2, write_bytes))[0] + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP elif b == 0xdf: - n = struct.unpack(">I", self._fb_read(4, write_bytes))[0] + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 if n > self._max_map_len: - raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) typ = TYPE_MAP else: raise UnpackValueError("Unknown header: 0x%x" % b) return typ, n, obj - def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None): - typ, n, obj = self._read_header(execute, write_bytes) + def _unpack(self, execute=EX_CONSTRUCT): + typ, n, obj = self._read_header(execute) if execute == EX_READ_ARRAY_HEADER: if typ != TYPE_ARRAY: @@ -510,11 +548,11 @@ def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None): if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call `list_hook` - self._fb_unpack(EX_SKIP, write_bytes) + self._unpack(EX_SKIP) return ret = newlist_hint(n) for i in xrange(n): - ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes)) + ret.append(self._unpack(EX_CONSTRUCT)) if self._list_hook is not None: ret = self._list_hook(ret) # TODO is the interaction between `list_hook` and `use_list` ok? @@ -523,19 +561,19 @@ def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None): if execute == EX_SKIP: for i in xrange(n): # TODO check whether we need to call hooks - self._fb_unpack(EX_SKIP, write_bytes) - self._fb_unpack(EX_SKIP, write_bytes) + self._unpack(EX_SKIP) + self._unpack(EX_SKIP) return if self._object_pairs_hook is not None: ret = self._object_pairs_hook( - (self._fb_unpack(EX_CONSTRUCT, write_bytes), - self._fb_unpack(EX_CONSTRUCT, write_bytes)) + (self._unpack(EX_CONSTRUCT), + self._unpack(EX_CONSTRUCT)) for _ in xrange(n)) else: ret = {} for _ in xrange(n): - key = self._fb_unpack(EX_CONSTRUCT, write_bytes) - ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes) + key = self._unpack(EX_CONSTRUCT) + ret[key] = self._unpack(EX_CONSTRUCT) if self._object_hook is not None: ret = self._object_hook(ret) return ret @@ -544,43 +582,64 @@ def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None): if typ == TYPE_RAW: if self._encoding is not None: obj = obj.decode(self._encoding, self._unicode_errors) + else: + obj = bytes(obj) return obj if typ == TYPE_EXT: - return self._ext_hook(n, obj) + return self._ext_hook(n, bytes(obj)) if typ == TYPE_BIN: - return obj + return bytes(obj) assert typ == TYPE_IMMEDIATE return obj - def next(self): + def __iter__(self): + return self + + def __next__(self): try: - ret = self._fb_unpack(EX_CONSTRUCT, None) - self._fb_sloppy_consume() + ret = self._unpack(EX_CONSTRUCT) + self._consume() return ret except OutOfData: - self._fb_consume() + self._consume() raise StopIteration - __next__ = next + + next = __next__ def skip(self, write_bytes=None): - self._fb_unpack(EX_SKIP, write_bytes) - self._fb_consume() + self._unpack(EX_SKIP) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() def unpack(self, write_bytes=None): - ret = self._fb_unpack(EX_CONSTRUCT, write_bytes) - self._fb_consume() + ret = self._unpack(EX_CONSTRUCT) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() return ret def read_array_header(self, write_bytes=None): - ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes) - self._fb_consume() + ret = self._unpack(EX_READ_ARRAY_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() return ret def read_map_header(self, write_bytes=None): - ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes) - self._fb_consume() + ret = self._unpack(EX_READ_MAP_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() return ret + def tell(self): + return self._stream_offset + class Packer(object): """ @@ -597,21 +656,36 @@ class Packer(object): :param callable default: Convert user type to builtin type that Packer supports. See also simplejson's document. - :param str encoding: - Convert unicode to bytes with this encoding. (default: 'utf-8') - :param str unicode_errors: - Error handler for encoding unicode. (default: 'strict') + :param bool use_single_float: Use single precision float type for float. (default: False) + :param bool autoreset: - Reset buffer after each pack and return it's content as `bytes`. (default: True). + Reset buffer after each pack and return its content as `bytes`. (default: True). If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + :param bool use_bin_type: Use bin type introduced in msgpack spec 2.0 for bytes. - It also enable str8 type for unicode. + It also enables str8 type for unicode. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + + :param str unicode_errors: + (deprecated) Error handler for encoding unicode. (default: 'strict') """ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict', - use_single_float=False, autoreset=True, use_bin_type=False): + use_single_float=False, autoreset=True, use_bin_type=False, + strict_types=False): + self._strict_types = strict_types self._use_float = use_single_float self._autoreset = autoreset self._use_bin_type = use_bin_type @@ -623,18 +697,24 @@ def __init__(self, default=None, encoding='utf-8', unicode_errors='strict', raise TypeError("default must be callable") self._default = default - def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance): + def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, + check=isinstance, check_type_strict=_check_type_strict): default_used = False + if self._strict_types: + check = check_type_strict + list_types = list + else: + list_types = (list, tuple) while True: if nest_limit < 0: raise PackValueError("recursion limit exceeded") if obj is None: return self._buffer.write(b"\xc0") - if isinstance(obj, bool): + if check(obj, bool): if obj: return self._buffer.write(b"\xc3") return self._buffer.write(b"\xc2") - if isinstance(obj, int_types): + if check(obj, int_types): if 0 <= obj < 0x80: return self._buffer.write(struct.pack("B", obj)) if -0x20 <= obj < 0: @@ -659,42 +739,35 @@ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance): obj = self._default(obj) default_used = True continue - raise PackValueError("Integer value out of range") - if self._use_bin_type and isinstance(obj, bytes): + raise PackOverflowError("Integer value out of range") + if check(obj, (bytes, bytearray)): n = len(obj) - if n <= 0xff: - self._buffer.write(struct.pack('>BB', 0xc4, n)) - elif n <= 0xffff: - self._buffer.write(struct.pack(">BH", 0xc5, n)) - elif n <= 0xffffffff: - self._buffer.write(struct.pack(">BI", 0xc6, n)) - else: - raise PackValueError("Bytes is too large") + if n >= 2**32: + raise PackValueError("%s is too large" % type(obj).__name__) + self._pack_bin_header(n) return self._buffer.write(obj) - if isinstance(obj, (Unicode, bytes)): - if isinstance(obj, Unicode): - if self._encoding is None: - raise TypeError( - "Can't encode unicode string: " - "no encoding is specified") - obj = obj.encode(self._encoding, self._unicode_errors) + if check(obj, Unicode): + if self._encoding is None: + raise TypeError( + "Can't encode unicode string: " + "no encoding is specified") + obj = obj.encode(self._encoding, self._unicode_errors) n = len(obj) - if n <= 0x1f: - self._buffer.write(struct.pack('B', 0xa0 + n)) - elif self._use_bin_type and n <= 0xff: - self._buffer.write(struct.pack('>BB', 0xd9, n)) - elif n <= 0xffff: - self._buffer.write(struct.pack(">BH", 0xda, n)) - elif n <= 0xffffffff: - self._buffer.write(struct.pack(">BI", 0xdb, n)) - else: + if n >= 2**32: raise PackValueError("String is too large") + self._pack_raw_header(n) + return self._buffer.write(obj) + if check(obj, memoryview): + n = len(obj) * obj.itemsize + if n >= 2**32: + raise PackValueError("Memoryview is too large") + self._pack_bin_header(n) return self._buffer.write(obj) - if isinstance(obj, float): + if check(obj, float): if self._use_float: return self._buffer.write(struct.pack(">Bf", 0xca, obj)) return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) - if isinstance(obj, ExtType): + if check(obj, ExtType): code = obj.code data = obj.data assert isinstance(code, int) @@ -719,20 +792,20 @@ def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance): self._buffer.write(struct.pack("b", code)) self._buffer.write(data) return - if isinstance(obj, (list, tuple)): + if check(obj, list_types): n = len(obj) - self._fb_pack_array_header(n) + self._pack_array_header(n) for i in xrange(n): self._pack(obj[i], nest_limit - 1) return - if isinstance(obj, dict): - return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj), + if check(obj, dict): + return self._pack_map_pairs(len(obj), dict_iteritems(obj), nest_limit - 1) if not default_used and self._default is not None: obj = self._default(obj) default_used = 1 continue - raise TypeError("Cannot serialize %r" % obj) + raise TypeError("Cannot serialize %r" % (obj, )) def pack(self, obj): self._pack(obj) @@ -744,7 +817,7 @@ def pack(self, obj): return ret def pack_map_pairs(self, pairs): - self._fb_pack_map_pairs(len(pairs), pairs) + self._pack_map_pairs(len(pairs), pairs) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() @@ -754,8 +827,8 @@ def pack_map_pairs(self, pairs): def pack_array_header(self, n): if n >= 2**32: - raise ValueError - self._fb_pack_array_header(n) + raise PackValueError + self._pack_array_header(n) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() @@ -765,8 +838,8 @@ def pack_array_header(self, n): def pack_map_header(self, n): if n >= 2**32: - raise ValueError - self._fb_pack_map_header(n) + raise PackValueError + self._pack_map_header(n) ret = self._buffer.getvalue() if self._autoreset: self._buffer = StringIO() @@ -783,7 +856,7 @@ def pack_ext_type(self, typecode, data): raise TypeError("data must have bytes type") L = len(data) if L > 0xffffffff: - raise ValueError("Too large data") + raise PackValueError("Too large data") if L == 1: self._buffer.write(b'\xd4') elif L == 2: @@ -803,7 +876,7 @@ def pack_ext_type(self, typecode, data): self._buffer.write(struct.pack('B', typecode)) self._buffer.write(data) - def _fb_pack_array_header(self, n): + def _pack_array_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x90 + n)) if n <= 0xffff: @@ -812,7 +885,7 @@ def _fb_pack_array_header(self, n): return self._buffer.write(struct.pack(">BI", 0xdd, n)) raise PackValueError("Array is too large") - def _fb_pack_map_header(self, n): + def _pack_map_header(self, n): if n <= 0x0f: return self._buffer.write(struct.pack('B', 0x80 + n)) if n <= 0xffff: @@ -821,12 +894,36 @@ def _fb_pack_map_header(self, n): return self._buffer.write(struct.pack(">BI", 0xdf, n)) raise PackValueError("Dict is too large") - def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): - self._fb_pack_map_header(n) + def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): + self._pack_map_header(n) for (k, v) in pairs: self._pack(k, nest_limit - 1) self._pack(v, nest_limit - 1) + def _pack_raw_header(self, n): + if n <= 0x1f: + self._buffer.write(struct.pack('B', 0xa0 + n)) + elif self._use_bin_type and n <= 0xff: + self._buffer.write(struct.pack('>BB', 0xd9, n)) + elif n <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xda, n)) + elif n <= 0xffffffff: + self._buffer.write(struct.pack(">BI", 0xdb, n)) + else: + raise PackValueError('Raw is too large') + + def _pack_bin_header(self, n): + if not self._use_bin_type: + return self._pack_raw_header(n) + elif n <= 0xff: + return self._buffer.write(struct.pack('>BB', 0xc4, n)) + elif n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xc5, n)) + elif n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xc6, n)) + else: + raise PackValueError('Bin is too large') + def bytes(self): return self._buffer.getvalue() diff --git a/ext/pkg_resources/__init__.py b/ext/pkg_resources/__init__.py index 68349df44e..08f9bbe7ef 100644 --- a/ext/pkg_resources/__init__.py +++ b/ext/pkg_resources/__init__.py @@ -480,8 +480,10 @@ def get_build_platform(): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), - _macosx_arch(machine)) + return "macosx-%d.%d-%s" % ( + int(version[0]), int(version[1]), + _macosx_arch(machine), + ) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation @@ -806,7 +808,8 @@ def resolve(self, requirements, env=None, installer=None, already-installed distribution; it should return a ``Distribution`` or ``None``. - Unless `replace_conflicting=True`, raises a VersionConflict exception if + Unless `replace_conflicting=True`, raises a VersionConflict exception + if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate @@ -885,8 +888,8 @@ def resolve(self, requirements, env=None, installer=None, # return list of distros to activate return to_activate - def find_plugins(self, plugin_env, full_env=None, installer=None, - fallback=True): + def find_plugins( + self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: @@ -1040,7 +1043,8 @@ def markers_pass(self, req, extras=None): class Environment(object): """Searchable snapshot of distributions on a search path""" - def __init__(self, search_path=None, platform=get_supported_platform(), + def __init__( + self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path @@ -1113,7 +1117,8 @@ def add(self, dist): dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - def best_match(self, req, working_set, installer=None, replace_conflicting=False): + def best_match( + self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a @@ -1248,8 +1253,8 @@ def extraction_error(self): tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache - The following error occurred while trying to extract file(s) to the Python egg - cache: + The following error occurred while trying to extract file(s) + to the Python egg cache: {old_exc} @@ -1257,9 +1262,9 @@ def extraction_error(self): {cache_path} - Perhaps your account does not have write access to this directory? You can - change the cache directory by setting the PYTHON_EGG_CACHE environment - variable to point to an accessible directory. + Perhaps your account does not have write access to this directory? + You can change the cache directory by setting the PYTHON_EGG_CACHE + environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self @@ -1309,11 +1314,13 @@ def _warn_unsafe_extraction_path(path): return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ("%s is writable by group/others and vulnerable to attack " + msg = ( + "%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." % path) + "PYTHON_EGG_CACHE environment variable)." % path + ) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): @@ -1506,7 +1513,10 @@ def metadata_listdir(self, name): def run_script(self, script_name, namespace): script = 'scripts/' + script_name if not self.has_metadata(script): - raise ResolutionError("No script named %r" % script_name) + raise ResolutionError( + "Script {script!r} not found in metadata at {self.egg_info!r}" + .format(**locals()), + ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) @@ -1597,8 +1607,11 @@ def _get(self, path): @classmethod def _register(cls): - loader_cls = getattr(importlib_machinery, 'SourceFileLoader', - type(None)) + loader_cls = getattr( + importlib_machinery, + 'SourceFileLoader', + type(None), + ) register_loader_type(loader_cls, cls) @@ -1634,7 +1647,7 @@ def build(cls, path): Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ - with ContextualZipFile(path) as zfile: + with zipfile.ZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), @@ -1667,26 +1680,6 @@ def load(self, path): return self[path].manifest -class ContextualZipFile(zipfile.ZipFile): - """ - Supplement ZipFile class to support context manager for Python 2.6 - """ - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def __new__(cls, *args, **kwargs): - """ - Construct a ZipFile or ContextualZipFile as appropriate - """ - if hasattr(zipfile.ZipFile, '__exit__'): - return zipfile.ZipFile(*args, **kwargs) - return super(ContextualZipFile, cls).__new__(cls) - - class ZipProvider(EggProvider): """Resource support for zips and eggs""" @@ -1700,6 +1693,9 @@ def __init__(self, module): def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive + fspath = fspath.rstrip(os.sep) + if fspath == self.loader.archive: + return '' if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( @@ -1766,7 +1762,10 @@ def _extract_resource(self, manager, zip_path): if self._is_current(real_path, zip_path): return real_path - outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) + outf, tmpnam = _mkstemp( + ".$extract", + dir=os.path.dirname(real_path), + ) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) @@ -1884,7 +1883,7 @@ def get_metadata(self, name): return metadata def _warn_on_replacement(self, metadata): - # Python 2.6 and 3.2 compat for: replacement_char = '�' + # Python 2.7 compat for: replacement_char = '�' replacement_char = b'\xef\xbf\xbd'.decode('utf-8') if replacement_char in metadata: tmpl = "{self.path} could not be properly decoded in UTF-8" @@ -1972,7 +1971,8 @@ def find_eggs_in_zip(importer, path_item, only=False): for subitem in metadata.resource_listdir('/'): if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) - for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): + dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) + for dist in dists: yield dist elif subitem.lower().endswith('.dist-info'): subpath = os.path.join(path_item, subitem) @@ -1981,7 +1981,6 @@ def find_eggs_in_zip(importer, path_item, only=False): yield Distribution.from_location(path_item, subitem, submeta) - register_finder(zipimport.zipimporter, find_eggs_in_zip) @@ -2124,7 +2123,11 @@ def non_empty_lines(path): """ Yield non-empty lines from file at path """ - return (line.rstrip() for line in open(path) if line.strip()) + with open(path) as f: + for line in f: + line = line.strip() + if line: + yield line def resolve_egg_link(path): @@ -2375,7 +2378,7 @@ def __init__(self, name, module_name, attrs=(), extras=(), dist=None): self.name = name self.module_name = module_name self.attrs = tuple(attrs) - self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras + self.extras = tuple(extras) self.dist = dist def __str__(self): @@ -2523,7 +2526,8 @@ class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' - def __init__(self, location=None, metadata=None, project_name=None, + def __init__( + self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') @@ -2799,7 +2803,8 @@ def insert_on(self, path, loc=None, replace=False): if replace: break else: - # don't modify path (even removing duplicates) if found and not replace + # don't modify path (even removing duplicates) if + # found and not replace return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory @@ -2896,7 +2901,10 @@ def _reload_version(self): class DistInfoDistribution(Distribution): - """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" + """ + Wrap an actual or potential sys.path entry + w/metadata, .dist-info style. + """ PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @@ -2946,7 +2954,7 @@ def reqs_for_extra(extra): '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, - } +} def issue_warning(*args, **kw): @@ -3031,7 +3039,8 @@ def __contains__(self, item): def __hash__(self): return self.__hash - def __repr__(self): return "Requirement.parse(%r)" % str(self) + def __repr__(self): + return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): @@ -3165,7 +3174,10 @@ def _initialize_master_working_set(): dist.activate(replace=False) for dist in working_set ) - add_activation_listener(lambda dist: dist.activate(replace=True), existing=False) + add_activation_listener( + lambda dist: dist.activate(replace=True), + existing=False, + ) working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) diff --git a/ext/pytz/__init__.py b/ext/pytz/__init__.py index 13c83b1137..cb76197716 100644 --- a/ext/pytz/__init__.py +++ b/ext/pytz/__init__.py @@ -9,8 +9,8 @@ ''' # The IANA (nee Olson) database is updated several times a year. -OLSON_VERSION = '2017b' -VERSION = '2017.2' # Switching to pip compatible version numbering. +OLSON_VERSION = '2017c' +VERSION = '2017.3' # Switching to pip compatible version numbering. __version__ = VERSION OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling @@ -52,8 +52,11 @@ def ascii(s): ... UnicodeEncodeError: ... """ - s.encode('ASCII') # Raise an exception if not ASCII - return s # But return the original string - not a byte string. + if type(s) == bytes: + s = s.decode('ASCII') + else: + s.encode('ASCII') # Raise an exception if not ASCII + return s # But the string - not a byte string. else: # Python 2.x @@ -76,24 +79,31 @@ def open_resource(name): Uses the pkg_resources module if available and no standard file found at the calculated location. + + It is possible to specify different location for zoneinfo + subdir by using the PYTZ_TZDATADIR environment variable. """ name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) - filename = os.path.join(os.path.dirname(__file__), - 'zoneinfo', *name_parts) - if not os.path.exists(filename): - # http://bugs.launchpad.net/bugs/383171 - we avoid using this - # unless absolutely necessary to help when a broken version of - # pkg_resources is installed. - try: - from pkg_resources import resource_stream - except ImportError: - resource_stream = None + zoneinfo_dir = os.environ.get('PYTZ_TZDATADIR', None) + if zoneinfo_dir != None: + filename = os.path.join(zoneinfo_dir, *name_parts) + else: + filename = os.path.join(os.path.dirname(__file__), + 'zoneinfo', *name_parts) + if not os.path.exists(filename): + # http://bugs.launchpad.net/bugs/383171 - we avoid using this + # unless absolutely necessary to help when a broken version of + # pkg_resources is installed. + try: + from pkg_resources import resource_stream + except ImportError: + resource_stream = None - if resource_stream is not None: - return resource_stream(__name__, 'zoneinfo/' + name) + if resource_stream is not None: + return resource_stream(__name__, 'zoneinfo/' + name) return open(filename, 'rb') @@ -865,7 +875,6 @@ def _test(): 'CST6CDT', 'Canada/Atlantic', 'Canada/Central', - 'Canada/East-Saskatchewan', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', diff --git a/ext/pytz/zoneinfo/Africa/Juba b/ext/pytz/zoneinfo/Africa/Juba index 3629188215dda35698077a0eb372eb2a09016ada..9fa711904dff7519d3061e1bc81527f5dba0769b 100644 GIT binary patch delta 38 rcmZ3@x|(%@G2@+yCM%g_mrT5$!FXr02cs?{n4``z6rK#m delta 38 qcmZ3@x|(%@F(bo7la)+z4HNHYFfwfRVAN#<^PSlvd68Ymvz~lfXWqBqB2Dt{1l$HS_3j>3U0s|+A l&Au(W-kML2ZJ5aNl{{#d~nhoqaFL?ZL!e5#g{- zcX*E&_sBPGW?w$NZ(cnd9gFQ*+DR;a-dQ+X$hEH>kqh@u>W<|zrgOMjcilZ~y8X1h zIH*id`he{{_ugEp`eFNOHca2Qx3<6Xo%Fvc*nzL>k}YiO!L27U^k7vFKbx1!vy1x5 z!jOz~C{2=F%9G9k`$cqz9x4qzj`Bqz|K!{4jbVg{yQfB5ue=lxc!MubO07BLWuK)l5 delta 16 YcmX>jbVg{yQfB4@4>&fjU|zxs06j$p#{d8T diff --git a/ext/pytz/zoneinfo/America/Atka b/ext/pytz/zoneinfo/America/Atka index 4f1ec71373359e29dbd7c52486583af9e5a964d7..5696e0f8bedee72515c43f686881a561af339db9 100644 GIT binary patch delta 28 kcmdlhv{z`uJZ9!Ye=lxc$Slgr{D^^J@*k$C$-*dsVWnXzl4ipIu}NlcSpFl94#O%7r9*t~{$7RzK0jsyVH^b7L< delta 62 zcmeAXTp>6?nXzZ0iU#{{<241pUs!MSp3F4)4pS~;&txxV5B~o^0FnXIFRV9DWS-3e Ll=hlj!XW_whmamV diff --git a/ext/pytz/zoneinfo/America/Grand_Turk b/ext/pytz/zoneinfo/America/Grand_Turk index 331aeac26ea9c9c9c50c36ee025cf99360aae4ee..a9740dae6876987c641a1f061b048288f7a577b0 100644 GIT binary patch delta 627 zcmaLTKWI}y0LSrrB?JkrkU?xq$>3s!_?om9M7q@~9a0D55;b0JLWoJsRnw6ACqYmG zT?{-(oU-WPA}ThZOF#!VaR>!n(xXGS!YeMFlHT`iE*>18+m7G25%_buc{b=5LU1`{ zbN!#f7p;2oZQs2$*w(jSblp3>f=;E|?%mF+mhm06*2v3y7g}mP8<**cs>)oOm6^k& z%AOC(?9WA&^Z%8(_aSxvU{~f_m)*jzBVDY_xDP(`b?N5NRj;~wV?nzQOKtt=%y+kZ zeM^^zd#ZA&B`bq(sybOcC#$_TYU?;DA9r4=C%+fv(^^(N`x271)QYOV7SeLotgBAb z=d*myb6?nbXNCNu(btdSl=H(1Is4YsKg%{*_IZ4)I94|=))@o~k(J0&WG%87S&b}5 z)*}Uw3P=f0Qv)gDX{sP)_+O9*NrYrVQX#pJWJoq79g+`8h-5@kdYYU_Qcsf=P1+^t vJxpFCF_IZcjpRm>BiWJk;{WZZg$OL)NGvR0NkroF_M9D!#A7i#W=8`jW9r8e delta 102 zcmcb~*UmLTT#%K4fdPa;zz2v~H)@zLGBQqPWOAFlfQ^NbWpV+N++;;|xyf6YP*wYa URByh+l*@!oIUAQ_aEJ*P05@k4)c^nh diff --git a/ext/pytz/zoneinfo/America/Juneau b/ext/pytz/zoneinfo/America/Juneau index ade50a8eeaa1dc389b4f8d421d83080792de7c52..d00668ad189cc7627b9301e08c00d84f1688ab92 100644 GIT binary patch delta 16 Ycmdlbv`c8iJZ9!Ye=lxc$UKu306;qjasU7T delta 16 Ycmdlbv`c8iJZ9zt7h5+kWS+?i06i}T_y7O^ diff --git a/ext/pytz/zoneinfo/America/Metlakatla b/ext/pytz/zoneinfo/America/Metlakatla index af71f0d226a07ea62dea49aa7687593628132bbb..c0335970448cf4d433b86e08d79ae4b9a167e1f6 100644 GIT binary patch delta 16 XcmeC;?&99y#>9N+@5RlYOpeR|Gw=oY delta 16 XcmeC;?&99y#>9N!+^x-?OpeR|GR+0h diff --git a/ext/pytz/zoneinfo/America/Nome b/ext/pytz/zoneinfo/America/Nome index d370ab14bc3551785daa5204985cf2a07794fcd0..c886c9bc0f46a6677e9824fb188761b6dbc418ff 100644 GIT binary patch delta 22 ecmX>hbV6vuVrJ$;e=lxc#w^Opm^XP3M-KpU;t6H| delta 22 ecmX>hbV6vuVrJ$8?Jr+}#>{-^@5RlX%#Ex7IRggD delta 16 XcmeAY>Jr+}#>{-+TK(ou=0;WkHTDKi diff --git a/ext/pytz/zoneinfo/Asia/Calcutta b/ext/pytz/zoneinfo/Asia/Calcutta index c874a4b4e693a80f162578ed96f5c54f55e33362..b57972dd8ab55785d8cb21b90b5277d8fc850fdc 100644 GIT binary patch literal 312 zcmWHE%1kq2zyK^j5fBCeHXsJEg&KfF``kUdPTlU&IKx^fab~K~ic2LzGZ>kefslbA z=mJmygSQ()ONsyh literal 293 zcmWHE%1kq2zyPd35fBCe7+a_T$T@YpZNnMXLWwg|jaFPL5t_ls#LU9Xzz}o;q%!CN z0}BH~bOr+l1A}h_10RUZ$l>D~!r%eK+6HFE1`M9TAt0qt5JK2`HlR|F6NG+1_525d lux%4HKs3lXAR6Q(Fb#AThz2>0fkL-&0iCLAYGrJ|1pqPkK3V_( diff --git a/ext/pytz/zoneinfo/Asia/Famagusta b/ext/pytz/zoneinfo/Asia/Famagusta index b24caad2ea14d71a805210c83c5822c749f88dca..021f8a2dd78a0a493f58af2ae4107dfddb4f2d40 100644 GIT binary patch delta 666 zcma*kJuF>u7{~GdUs7(b;JJg`Md?s-2k&YlolaN`jk~n2w^CGF@5EJ{O-%C0Y0?-( zhgM8NXc3b}#2|Dr7^I1*X+$I?O(hcNdlQ?%O-?@Np6&O2zNTJfXgHK&jN#$u`cKMo zBxck8aMOoxa=YmG_gC+BIJ*-5ojFGf^(R&nu*_EiC;d*)rh# zm%)&cp`uJ3IzP#i;q-qx{Bv94XGc1+G%p|aH+6L8hmI}I>UjK%x>K$iZ`ww@IUDdc zY_7Luv%CYF=KZoyl8woFY#w^kefslbA z=mJmygSQ()ONsyh literal 293 zcmWHE%1kq2zyPd35fBCe7+a_T$T@YpZNnMXLWwg|jaFPL5t_ls#LU9Xzz}o;q%!CN z0}BH~bOr+l1A}h_10RUZ$l>D~!r%eK+6HFE1`M9TAt0qt5JK2`HlR|F6NG+1_525d lux%4HKs3lXAR6Q(Fb#AThz2>0fkL-&0iCLAYGrJ|1pqPkK3V_( diff --git a/ext/pytz/zoneinfo/Asia/Rangoon b/ext/pytz/zoneinfo/Asia/Rangoon index d1d5579b5b29dd71bc521f5968ec70f1a6645776..3cc2aafac4e27bcbb2de4b08c6451c1a56452808 100644 GIT binary patch delta 66 zcmZ33`2d>%87lw) delta 66 zcmZ33`2d?-86*Gz diff --git a/ext/pytz/zoneinfo/Asia/Yangon b/ext/pytz/zoneinfo/Asia/Yangon index d1d5579b5b29dd71bc521f5968ec70f1a6645776..3cc2aafac4e27bcbb2de4b08c6451c1a56452808 100644 GIT binary patch delta 66 zcmZ33`2d>%87lw) delta 66 zcmZ33`2d?-86*Gz diff --git a/ext/pytz/zoneinfo/Asia/Yerevan b/ext/pytz/zoneinfo/Asia/Yerevan index 64db2f96192a10f29906beeedc044e6179b1a5b7..4c4e045bd3a47ef74883c0ec74261e8c6a41dc1a 100644 GIT binary patch delta 65 zcmZ3_xtDW-GNaQ*6-GvGIWq^}zQhN9YswQQGqN}_GEdH9Y-4oV{FU)9BQsEP@>}LG RuwXJLmyNc8iJh(q7XXYa5{Li* delta 39 tcmdnXxt?=^GNa>06-LI%nv9;416h(MUt zu9t}GFYyQN%A;F)^=5^;R$r{w3k%$h$}06WyIeLe6{*di`Ley?tMAiO@?#{ep>QT XtO!|>)vO6w6tXHj`elX9)XKuUTnF}q diff --git a/ext/pytz/zoneinfo/Eire b/ext/pytz/zoneinfo/Eire index a7cffbbb95616c3254ca907795b9015f33a11b0f..df3ecddd02ec05a8de690516a46eb0bd44170483 100644 GIT binary patch delta 430 zcmaDZeO-EjxF8n;0|N+yz%wA`ny7J!>w&^mImHFn{MIOJW?}rqG+BT}c=7@sULeg| z&(6upf(|)2@j*6xk|fK)G)zw9)tfw(SA~NS2pAX`87K3wDx!GjImkPk?O0ba3O!Kx X4+J1jfoQPDez9$49U>k(%&P(b&?+;) delta 444 zcmcaE{akv2xF9zJ0|N+yz%wA`o~Ut&>yp4#ImHFn{N@R4W?}rq#LCVFgp(gKYt(aa zv9h2;PA+_q4WA^bavU%XlUK3mF)?yXzQUp*!obML2y{7^0C@w%MDy8mpdj~VGuBm% fLYD;o0|Cf?AR6q)Pi&i62e%)&5PqDznO6k>=iM_V diff --git a/ext/pytz/zoneinfo/Europe/Dublin b/ext/pytz/zoneinfo/Europe/Dublin index a7cffbbb95616c3254ca907795b9015f33a11b0f..df3ecddd02ec05a8de690516a46eb0bd44170483 100644 GIT binary patch delta 430 zcmaDZeO-EjxF8n;0|N+yz%wA`ny7J!>w&^mImHFn{MIOJW?}rqG+BT}c=7@sULeg| z&(6upf(|)2@j*6xk|fK)G)zw9)tfw(SA~NS2pAX`87K3wDx!GjImkPk?O0ba3O!Kx X4+J1jfoQPDez9$49U>k(%&P(b&?+;) delta 444 zcmcaE{akv2xF9zJ0|N+yz%wA`o~Ut&>yp4#ImHFn{N@R4W?}rq#LCVFgp(gKYt(aa zv9h2;PA+_q4WA^bavU%XlUK3mF)?yXzQUp*!obML2y{7^0C@w%MDy8mpdj~VGuBm% fLYD;o0|Cf?AR6q)Pi&i62e%)&5PqDznO6k>=iM_V diff --git a/ext/pytz/zoneinfo/Pacific/Apia b/ext/pytz/zoneinfo/Pacific/Apia index 42fbbb3b7d1e61587285ee1c3bc5643494c63ad5..4091a85f388b57df98ff9ab056958e41c7d00fd4 100644 GIT binary patch delta 17 ZcmaFI@s4A|BSw}y+mj5NpD}hY0RTnq2OIzZ delta 17 ZcmaFI@s4A|BSx0c6Hgm9KV$4*0su`Y2r2*o diff --git a/ext/pytz/zoneinfo/Pacific/Fiji b/ext/pytz/zoneinfo/Pacific/Fiji index b30f496ca83bcec4fd2c6cc2020150f3799c9e51..912db189431adbc130b6397e317dd40733601fae 100644 GIT binary patch delta 93 zcmX@dae;$1Bq}q_c=9Yp^@%#M%uxalCidtuFPfe(agQo9xAlXGPjs1s4<$?%VQggt osd@rZGP#i{7NlnKLndF4qREELv0zmbnYVKo=^5%9nwWC|05JR@UH||9 delta 91 zcmcb>agKvEBq}q_X!0yZ^@%#M%*oyf6MJ--S6_TEagQpqNLj+fC%Vire;!O0VQggt msd@rZGP#i{7NlnKLndF4qREELv0zmbnYXhU>lx@9a{&N6Fd_{A diff --git a/ext/pytz/zoneinfo/Pacific/Midway b/ext/pytz/zoneinfo/Pacific/Midway index 85316b470602e317b362f008bfcb42a5550577a7..3e38e97c97ddf5e054fba822cfce1c5ccd422e3f 100644 GIT binary patch delta 14 VcmX@Yc!Y66A4{I?366=Ai~uT41$_Vj delta 14 WcmX@Yc!Y66A4}+oCmjcnUQ;oMjq@=Tu1n8FDXVt|TyPCm`d$;br&r&SM* literal 1003 zcmc)IJ!lhg7{~Gdov})Wf+Dymih@gPnwQ#2jU_0kSR)UTq1M_LLR(|BhP0Ggo5o2| zK?DUsag+*D2NA7%1>p3Q8VUy$^HYnjfqbJ{tvUJw3!s9pK( za_H@K?cTXg4nL}B&&EJ{Z}e(kI4}KYCZ+%VGszG3NdED(4D8Ct!0jhGv}uD3UAU=* zx`-5tXZ7gEPg3kC=&_ff9&hN-;X5Dn#Hu4wy7*j6U-n9Q_?DL6G|K4y^E!I3UdCFd zW$f~28DD))#z$Vs$*)D3=w8%Qb6J^8t5zN~$<&&=TD`hLsz0u1wN|SI;gZPG@ZUb6 zb2EGFUAUQ&i#T^*UDUa!?}GZ@@AqYMt|5_3#_gK2vzZ5ifU6z3zrXet`})IDR^xT$RxZr3uGEzn+GxxugwIRir40XOa_?^ wG96?-$b^s?Aye|&oV?b?{ijVu)3&ts&54xR+}@Z?Bw~qJ+>E7S@upPt7lz{bEC2ui diff --git a/ext/pytz/zoneinfo/US/Alaska b/ext/pytz/zoneinfo/US/Alaska index f5ee0742e44f20fefef227f9370d6e1af3346c63..6c8bdf226900d7f4c6a4d47338e6742d4d6747cd 100644 GIT binary patch delta 16 YcmX>jbVg{yQfB5ue=lxc!MubO07BLWuK)l5 delta 16 YcmX>jbVg{yQfB4@4>&fjU|zxs06j$p#{d8T diff --git a/ext/pytz/zoneinfo/US/Aleutian b/ext/pytz/zoneinfo/US/Aleutian index 4f1ec71373359e29dbd7c52486583af9e5a964d7..5696e0f8bedee72515c43f686881a561af339db9 100644 GIT binary patch delta 28 kcmdlhv{z`uJZ9!Ye=lxc$Slgr{D^^J@*k$C$-*dsVWnXzl4ipIu}NlcSpFl94#O%7r9*t~{$7RzK0jsyVH^b7L< delta 62 zcmeAXTp>6?nXzZ0iU#{{<241pUs!MSp3F4)4pS~;&txxV5B~o^0FnXIFRV9DWS-3e Ll=hlj!XW_whmamV diff --git a/ext/pytz/zoneinfo/US/Samoa b/ext/pytz/zoneinfo/US/Samoa index 85316b470602e317b362f008bfcb42a5550577a7..3e38e97c97ddf5e054fba822cfce1c5ccd422e3f 100644 GIT binary patch delta 14 VcmX@Yc!Y66A4{I?366=Ai~uT41$_Vj delta 14 WcmX@Yc!Y66A4}+oCmj=1 23s 0 - +R A 1917 o - Mar 24 23s 1 S +R A 1918 o - Mar 9 23s 1 S +R A 1919 o - Mar 1 23s 1 S +R A 1920 o - F 14 23s 1 S +R A 1920 o - O 23 23s 0 - +R A 1921 o - Mar 14 23s 1 S +R A 1921 o - Jun 21 23s 0 - +R A 1939 o - S 11 23s 1 S +R A 1939 o - N 19 1 0 - +R A 1944 1945 - Ap M>=1 2 1 S +R A 1944 o - O 8 2 0 - +R A 1945 o - S 16 1 0 - +R A 1971 o - Ap 25 23s 1 S +R A 1971 o - S 26 23s 0 - +R A 1977 o - May 6 0 1 S +R A 1977 o - O 21 0 0 - +R A 1978 o - Mar 24 1 1 S +R A 1978 o - S 22 3 0 - +R A 1980 o - Ap 25 0 1 S +R A 1980 o - O 31 2 0 - +Z Africa/Algiers 0:12:12 - LMT 1891 Mar 15 0:1 +0:9:21 - PMT 1911 Mar 11 +0 A WE%sT 1940 F 25 2 +1 A CE%sT 1946 O 7 +0 - WET 1956 Ja 29 +1 - CET 1963 Ap 14 +0 A WE%sT 1977 O 21 +1 A CE%sT 1979 O 26 +0 A WE%sT 1981 May +1 - CET +Z Atlantic/Cape_Verde -1:34:4 - LMT 1907 +-2 - -02 1942 S +-2 1 -01 1945 O 15 +-2 - -02 1975 N 25 2 +-1 - -01 +Z Africa/Ndjamena 1:0:12 - LMT 1912 +1 - WAT 1979 O 14 +1 1 WAST 1980 Mar 8 +1 - WAT +Z Africa/Abidjan -0:16:8 - LMT 1912 +0 - GMT +Li Africa/Abidjan Africa/Bamako +Li Africa/Abidjan Africa/Banjul +Li Africa/Abidjan Africa/Conakry +Li Africa/Abidjan Africa/Dakar +Li Africa/Abidjan Africa/Freetown +Li Africa/Abidjan Africa/Lome +Li Africa/Abidjan Africa/Nouakchott +Li Africa/Abidjan Africa/Ouagadougou +Li Africa/Abidjan Africa/Sao_Tome +Li Africa/Abidjan Atlantic/St_Helena +R B 1940 o - Jul 15 0 1 S +R B 1940 o - O 1 0 0 - +R B 1941 o - Ap 15 0 1 S +R B 1941 o - S 16 0 0 - +R B 1942 1944 - Ap 1 0 1 S +R B 1942 o - O 27 0 0 - +R B 1943 1945 - N 1 0 0 - +R B 1945 o - Ap 16 0 1 S +R B 1957 o - May 10 0 1 S +R B 1957 1958 - O 1 0 0 - +R B 1958 o - May 1 0 1 S +R B 1959 1981 - May 1 1 1 S +R B 1959 1965 - S 30 3 0 - +R B 1966 1994 - O 1 3 0 - +R B 1982 o - Jul 25 1 1 S +R B 1983 o - Jul 12 1 1 S +R B 1984 1988 - May 1 1 1 S +R B 1989 o - May 6 1 1 S +R B 1990 1994 - May 1 1 1 S +R B 1995 2010 - Ap lastF 0s 1 S +R B 1995 2005 - S lastTh 24 0 - +R B 2006 o - S 21 24 0 - +R B 2007 o - S Th>=1 24 0 - +R B 2008 o - Au lastTh 24 0 - +R B 2009 o - Au 20 24 0 - +R B 2010 o - Au 10 24 0 - +R B 2010 o - S 9 24 1 S +R B 2010 o - S lastTh 24 0 - +R B 2014 o - May 15 24 1 S +R B 2014 o - Jun 26 24 0 - +R B 2014 o - Jul 31 24 1 S +R B 2014 o - S lastTh 24 0 - +Z Africa/Cairo 2:5:9 - LMT 1900 O +2 B EE%sT +R C 1920 1942 - S 1 0 0:20 GHST +R C 1920 1942 - D 31 0 0 GMT +Z Africa/Accra -0:0:52 - LMT 1918 +0 C GMT/+0020 +Z Africa/Bissau -1:2:20 - LMT 1912 +-1 - -01 1975 +0 - GMT +Z Africa/Nairobi 2:27:16 - LMT 1928 Jul +3 - EAT 1930 +2:30 - +0230 1940 +2:45 - +0245 1960 +3 - EAT +Li Africa/Nairobi Africa/Addis_Ababa +Li Africa/Nairobi Africa/Asmara +Li Africa/Nairobi Africa/Dar_es_Salaam +Li Africa/Nairobi Africa/Djibouti +Li Africa/Nairobi Africa/Kampala +Li Africa/Nairobi Africa/Mogadishu +Li Africa/Nairobi Indian/Antananarivo +Li Africa/Nairobi Indian/Comoro +Li Africa/Nairobi Indian/Mayotte +Z Africa/Monrovia -0:43:8 - LMT 1882 +-0:43:8 - MMT 1919 Mar +-0:44:30 - MMT 1972 Ja 7 +0 - GMT +R D 1951 o - O 14 2 1 S +R D 1952 o - Ja 1 0 0 - +R D 1953 o - O 9 2 1 S +R D 1954 o - Ja 1 0 0 - +R D 1955 o - S 30 0 1 S +R D 1956 o - Ja 1 0 0 - +R D 1982 1984 - Ap 1 0 1 S +R D 1982 1985 - O 1 0 0 - +R D 1985 o - Ap 6 0 1 S +R D 1986 o - Ap 4 0 1 S +R D 1986 o - O 3 0 0 - +R D 1987 1989 - Ap 1 0 1 S +R D 1987 1989 - O 1 0 0 - +R D 1997 o - Ap 4 0 1 S +R D 1997 o - O 4 0 0 - +R D 2013 o - Mar lastF 1 1 S +R D 2013 o - O lastF 2 0 - +Z Africa/Tripoli 0:52:44 - LMT 1920 +1 D CE%sT 1959 +2 - EET 1982 +1 D CE%sT 1990 May 4 +2 - EET 1996 S 30 +1 D CE%sT 1997 O 4 +2 - EET 2012 N 10 2 +1 D CE%sT 2013 O 25 2 +2 - EET +R E 1982 o - O 10 0 1 S +R E 1983 o - Mar 21 0 0 - +R E 2008 o - O lastSun 2 1 S +R E 2009 o - Mar lastSun 2 0 - +Z Indian/Mauritius 3:50 - LMT 1907 +4 E +04/+05 +R F 1939 o - S 12 0 1 S +R F 1939 o - N 19 0 0 - +R F 1940 o - F 25 0 1 S +R F 1945 o - N 18 0 0 - +R F 1950 o - Jun 11 0 1 S +R F 1950 o - O 29 0 0 - +R F 1967 o - Jun 3 12 1 S +R F 1967 o - O 1 0 0 - +R F 1974 o - Jun 24 0 1 S +R F 1974 o - S 1 0 0 - +R F 1976 1977 - May 1 0 1 S +R F 1976 o - Au 1 0 0 - +R F 1977 o - S 28 0 0 - +R F 1978 o - Jun 1 0 1 S +R F 1978 o - Au 4 0 0 - +R F 2008 o - Jun 1 0 1 S +R F 2008 o - S 1 0 0 - +R F 2009 o - Jun 1 0 1 S +R F 2009 o - Au 21 0 0 - +R F 2010 o - May 2 0 1 S +R F 2010 o - Au 8 0 0 - +R F 2011 o - Ap 3 0 1 S +R F 2011 o - Jul 31 0 0 - +R F 2012 2013 - Ap lastSun 2 1 S +R F 2012 o - Jul 20 3 0 - +R F 2012 o - Au 20 2 1 S +R F 2012 o - S 30 3 0 - +R F 2013 o - Jul 7 3 0 - +R F 2013 o - Au 10 2 1 S +R F 2013 ma - O lastSun 3 0 - +R F 2014 2021 - Mar lastSun 2 1 S +R F 2014 o - Jun 28 3 0 - +R F 2014 o - Au 2 2 1 S +R F 2015 o - Jun 14 3 0 - +R F 2015 o - Jul 19 2 1 S +R F 2016 o - Jun 5 3 0 - +R F 2016 o - Jul 10 2 1 S +R F 2017 o - May 21 3 0 - +R F 2017 o - Jul 2 2 1 S +R F 2018 o - May 13 3 0 - +R F 2018 o - Jun 17 2 1 S +R F 2019 o - May 5 3 0 - +R F 2019 o - Jun 9 2 1 S +R F 2020 o - Ap 19 3 0 - +R F 2020 o - May 24 2 1 S +R F 2021 o - Ap 11 3 0 - +R F 2021 o - May 16 2 1 S +R F 2022 o - May 8 2 1 S +R F 2023 o - Ap 23 2 1 S +R F 2024 o - Ap 14 2 1 S +R F 2025 o - Ap 6 2 1 S +R F 2026 ma - Mar lastSun 2 1 S +R F 2036 o - O 19 3 0 - +R F 2037 o - O 4 3 0 - +Z Africa/Casablanca -0:30:20 - LMT 1913 O 26 +0 F WE%sT 1984 Mar 16 +1 - CET 1986 +0 F WE%sT +Z Africa/El_Aaiun -0:52:48 - LMT 1934 +-1 - -01 1976 Ap 14 +0 F WE%sT +Z Africa/Maputo 2:10:20 - LMT 1903 Mar +2 - CAT +Li Africa/Maputo Africa/Blantyre +Li Africa/Maputo Africa/Bujumbura +Li Africa/Maputo Africa/Gaborone +Li Africa/Maputo Africa/Harare +Li Africa/Maputo Africa/Kigali +Li Africa/Maputo Africa/Lubumbashi +Li Africa/Maputo Africa/Lusaka +R G 1994 o - Mar 21 0 0 - +R G 1994 2016 - S Sun>=1 2 1 S +R G 1995 2017 - Ap Sun>=1 2 0 - +Z Africa/Windhoek 1:8:24 - LMT 1892 F 8 +1:30 - +0130 1903 Mar +2 - SAST 1942 S 20 2 +2 1 SAST 1943 Mar 21 2 +2 - SAST 1990 Mar 21 +2 - CAT 1994 Mar 21 +1 G WA%sT 2017 S 3 2 +2 - CAT +Z Africa/Lagos 0:13:36 - LMT 1919 S +1 - WAT +Li Africa/Lagos Africa/Bangui +Li Africa/Lagos Africa/Brazzaville +Li Africa/Lagos Africa/Douala +Li Africa/Lagos Africa/Kinshasa +Li Africa/Lagos Africa/Libreville +Li Africa/Lagos Africa/Luanda +Li Africa/Lagos Africa/Malabo +Li Africa/Lagos Africa/Niamey +Li Africa/Lagos Africa/Porto-Novo +Z Indian/Reunion 3:41:52 - LMT 1911 Jun +4 - +04 +Z Indian/Mahe 3:41:48 - LMT 1906 Jun +4 - +04 +R H 1942 1943 - S Sun>=15 2 1 - +R H 1943 1944 - Mar Sun>=15 2 0 - +Z Africa/Johannesburg 1:52 - LMT 1892 F 8 +1:30 - SAST 1903 Mar +2 H SAST +Li Africa/Johannesburg Africa/Maseru +Li Africa/Johannesburg Africa/Mbabane +R I 1970 o - May 1 0 1 S +R I 1970 1985 - O 15 0 0 - +R I 1971 o - Ap 30 0 1 S +R I 1972 1985 - Ap lastSun 0 1 S +Z Africa/Khartoum 2:10:8 - LMT 1931 +2 I CA%sT 2000 Ja 15 12 +3 - EAT 2017 N +2 - CAT +Z Africa/Juba 2:6:28 - LMT 1931 +2 I CA%sT 2000 Ja 15 12 +3 - EAT +R J 1939 o - Ap 15 23s 1 S +R J 1939 o - N 18 23s 0 - +R J 1940 o - F 25 23s 1 S +R J 1941 o - O 6 0 0 - +R J 1942 o - Mar 9 0 1 S +R J 1942 o - N 2 3 0 - +R J 1943 o - Mar 29 2 1 S +R J 1943 o - Ap 17 2 0 - +R J 1943 o - Ap 25 2 1 S +R J 1943 o - O 4 2 0 - +R J 1944 1945 - Ap M>=1 2 1 S +R J 1944 o - O 8 0 0 - +R J 1945 o - S 16 0 0 - +R J 1977 o - Ap 30 0s 1 S +R J 1977 o - S 24 0s 0 - +R J 1978 o - May 1 0s 1 S +R J 1978 o - O 1 0s 0 - +R J 1988 o - Jun 1 0s 1 S +R J 1988 1990 - S lastSun 0s 0 - +R J 1989 o - Mar 26 0s 1 S +R J 1990 o - May 1 0s 1 S +R J 2005 o - May 1 0s 1 S +R J 2005 o - S 30 1s 0 - +R J 2006 2008 - Mar lastSun 2s 1 S +R J 2006 2008 - O lastSun 2s 0 - +Z Africa/Tunis 0:40:44 - LMT 1881 May 12 +0:9:21 - PMT 1911 Mar 11 +1 J CE%sT +Z Antarctica/Casey 0 - -00 1969 +8 - +08 2009 O 18 2 +11 - +11 2010 Mar 5 2 +8 - +08 2011 O 28 2 +11 - +11 2012 F 21 17u +8 - +08 2016 O 22 +11 - +11 +Z Antarctica/Davis 0 - -00 1957 Ja 13 +7 - +07 1964 N +0 - -00 1969 F +7 - +07 2009 O 18 2 +5 - +05 2010 Mar 10 20u +7 - +07 2011 O 28 2 +5 - +05 2012 F 21 20u +7 - +07 +Z Antarctica/Mawson 0 - -00 1954 F 13 +6 - +06 2009 O 18 2 +5 - +05 +Z Indian/Kerguelen 0 - -00 1950 +5 - +05 +Z Antarctica/DumontDUrville 0 - -00 1947 +10 - +10 1952 Ja 14 +0 - -00 1956 N +10 - +10 +Z Antarctica/Syowa 0 - -00 1957 Ja 29 +3 - +03 +R K 2005 ma - Mar lastSun 1u 2 +02 +R K 2004 ma - O lastSun 1u 0 +00 +Z Antarctica/Troll 0 - -00 2005 F 12 +0 K %s +Z Antarctica/Vostok 0 - -00 1957 D 16 +6 - +06 +Z Antarctica/Rothera 0 - -00 1976 D +-3 - -03 +Z Asia/Kabul 4:36:48 - LMT 1890 +4 - +04 1945 +4:30 - +0430 +R L 2011 o - Mar lastSun 2s 1 S +R L 2011 o - O lastSun 2s 0 - +Z Asia/Yerevan 2:58 - LMT 1924 May 2 +3 - +03 1957 Mar +4 M +04/+05 1991 Mar 31 2s +3 M +03/+04 1995 S 24 2s +4 - +04 1997 +4 M +04/+05 2011 +4 L +04/+05 +R N 1997 2015 - Mar lastSun 4 1 S +R N 1997 2015 - O lastSun 5 0 - +Z Asia/Baku 3:19:24 - LMT 1924 May 2 +3 - +03 1957 Mar +4 M +04/+05 1991 Mar 31 2s +3 M +03/+04 1992 S lastSun 2s +4 - +04 1996 +4 O +04/+05 1997 +4 N +04/+05 +R P 2009 o - Jun 19 23 1 S +R P 2009 o - D 31 24 0 - +Z Asia/Dhaka 6:1:40 - LMT 1890 +5:53:20 - HMT 1941 O +6:30 - +0630 1942 May 15 +5:30 - +0530 1942 S +6:30 - +0630 1951 S 30 +6 - +06 2009 +6 P +06/+07 +Z Asia/Thimphu 5:58:36 - LMT 1947 Au 15 +5:30 - +0530 1987 O +6 - +06 +Z Indian/Chagos 4:49:40 - LMT 1907 +5 - +05 1996 +6 - +06 +Z Asia/Brunei 7:39:40 - LMT 1926 Mar +7:30 - +0730 1933 +8 - +08 +Z Asia/Yangon 6:24:47 - LMT 1880 +6:24:47 - RMT 1920 +6:30 - +0630 1942 May +9 - +09 1945 May 3 +6:30 - +0630 +R Q 1940 o - Jun 3 0 1 D +R Q 1940 1941 - O 1 0 0 S +R Q 1941 o - Mar 16 0 1 D +R R 1986 o - May 4 0 1 D +R R 1986 1991 - S Sun>=11 0 0 S +R R 1987 1991 - Ap Sun>=10 0 1 D +Z Asia/Shanghai 8:5:43 - LMT 1901 +8 Q C%sT 1949 +8 R C%sT +Z Asia/Urumqi 5:50:20 - LMT 1928 +6 - +06 +R S 1941 o - Ap 1 3:30 1 S +R S 1941 o - S 30 3:30 0 - +R S 1946 o - Ap 20 3:30 1 S +R S 1946 o - D 1 3:30 0 - +R S 1947 o - Ap 13 3:30 1 S +R S 1947 o - D 30 3:30 0 - +R S 1948 o - May 2 3:30 1 S +R S 1948 1951 - O lastSun 3:30 0 - +R S 1952 o - O 25 3:30 0 - +R S 1949 1953 - Ap Sun>=1 3:30 1 S +R S 1953 o - N 1 3:30 0 - +R S 1954 1964 - Mar Sun>=18 3:30 1 S +R S 1954 o - O 31 3:30 0 - +R S 1955 1964 - N Sun>=1 3:30 0 - +R S 1965 1976 - Ap Sun>=16 3:30 1 S +R S 1965 1976 - O Sun>=16 3:30 0 - +R S 1973 o - D 30 3:30 1 S +R S 1979 o - May Sun>=8 3:30 1 S +R S 1979 o - O Sun>=16 3:30 0 - +Z Asia/Hong_Kong 7:36:42 - LMT 1904 O 30 +8 S HK%sT 1941 D 25 +9 - JST 1945 S 15 +8 S HK%sT +R T 1946 o - May 15 0 1 D +R T 1946 o - O 1 0 0 S +R T 1947 o - Ap 15 0 1 D +R T 1947 o - N 1 0 0 S +R T 1948 1951 - May 1 0 1 D +R T 1948 1951 - O 1 0 0 S +R T 1952 o - Mar 1 0 1 D +R T 1952 1954 - N 1 0 0 S +R T 1953 1959 - Ap 1 0 1 D +R T 1955 1961 - O 1 0 0 S +R T 1960 1961 - Jun 1 0 1 D +R T 1974 1975 - Ap 1 0 1 D +R T 1974 1975 - O 1 0 0 S +R T 1979 o - Jul 1 0 1 D +R T 1979 o - O 1 0 0 S +Z Asia/Taipei 8:6 - LMT 1896 +8 - CST 1937 O +9 - JST 1945 S 21 1 +8 T C%sT +R U 1961 1962 - Mar Sun>=16 3:30 1 D +R U 1961 1964 - N Sun>=1 3:30 0 S +R U 1963 o - Mar Sun>=16 0 1 D +R U 1964 o - Mar Sun>=16 3:30 1 D +R U 1965 o - Mar Sun>=16 0 1 D +R U 1965 o - O 31 0 0 S +R U 1966 1971 - Ap Sun>=16 3:30 1 D +R U 1966 1971 - O Sun>=16 3:30 0 S +R U 1972 1974 - Ap Sun>=15 0 1 D +R U 1972 1973 - O Sun>=15 0 0 S +R U 1974 1977 - O Sun>=15 3:30 0 S +R U 1975 1977 - Ap Sun>=15 3:30 1 D +R U 1978 1980 - Ap Sun>=15 0 1 D +R U 1978 1980 - O Sun>=15 0 0 S +Z Asia/Macau 7:34:20 - LMT 1912 +8 U C%sT +R V 1975 o - Ap 13 0 1 S +R V 1975 o - O 12 0 0 - +R V 1976 o - May 15 0 1 S +R V 1976 o - O 11 0 0 - +R V 1977 1980 - Ap Sun>=1 0 1 S +R V 1977 o - S 25 0 0 - +R V 1978 o - O 2 0 0 - +R V 1979 1997 - S lastSun 0 0 - +R V 1981 1998 - Mar lastSun 0 1 S +Z Asia/Nicosia 2:13:28 - LMT 1921 N 14 +2 V EE%sT 1998 S +2 O EE%sT +Z Asia/Famagusta 2:15:48 - LMT 1921 N 14 +2 V EE%sT 1998 S +2 O EE%sT 2016 S 8 +3 - +03 2017 O 29 1u +2 O EE%sT +Li Asia/Nicosia Europe/Nicosia +Z Asia/Tbilisi 2:59:11 - LMT 1880 +2:59:11 - TBMT 1924 May 2 +3 - +03 1957 Mar +4 M +04/+05 1991 Mar 31 2s +3 M +03/+04 1992 +3 W +03/+04 1994 S lastSun +4 W +04/+05 1996 O lastSun +4 1 +05 1997 Mar lastSun +4 W +04/+05 2004 Jun 27 +3 M +03/+04 2005 Mar lastSun 2 +4 - +04 +Z Asia/Dili 8:22:20 - LMT 1912 +8 - +08 1942 F 21 23 +9 - +09 1976 May 3 +8 - +08 2000 S 17 +9 - +09 +Z Asia/Kolkata 5:53:28 - LMT 1854 Jun 28 +5:53:20 - HMT 1870 +5:21:10 - MMT 1906 +5:30 - IST 1941 O +5:30 1 +0630 1942 May 15 +5:30 - IST 1942 S +5:30 1 +0630 1945 O 15 +5:30 - IST +Z Asia/Jakarta 7:7:12 - LMT 1867 Au 10 +7:7:12 - BMT 1923 D 31 23:47:12 +7:20 - +0720 1932 N +7:30 - +0730 1942 Mar 23 +9 - +09 1945 S 23 +7:30 - +0730 1948 May +8 - +08 1950 May +7:30 - +0730 1964 +7 - WIB +Z Asia/Pontianak 7:17:20 - LMT 1908 May +7:17:20 - PMT 1932 N +7:30 - +0730 1942 Ja 29 +9 - +09 1945 S 23 +7:30 - +0730 1948 May +8 - +08 1950 May +7:30 - +0730 1964 +8 - WITA 1988 +7 - WIB +Z Asia/Makassar 7:57:36 - LMT 1920 +7:57:36 - MMT 1932 N +8 - +08 1942 F 9 +9 - +09 1945 S 23 +8 - WITA +Z Asia/Jayapura 9:22:48 - LMT 1932 N +9 - +09 1944 S +9:30 - +0930 1964 +9 - WIT +R X 1978 1980 - Mar 21 0 1 D +R X 1978 o - O 21 0 0 S +R X 1979 o - S 19 0 0 S +R X 1980 o - S 23 0 0 S +R X 1991 o - May 3 0 1 D +R X 1992 1995 - Mar 22 0 1 D +R X 1991 1995 - S 22 0 0 S +R X 1996 o - Mar 21 0 1 D +R X 1996 o - S 21 0 0 S +R X 1997 1999 - Mar 22 0 1 D +R X 1997 1999 - S 22 0 0 S +R X 2000 o - Mar 21 0 1 D +R X 2000 o - S 21 0 0 S +R X 2001 2003 - Mar 22 0 1 D +R X 2001 2003 - S 22 0 0 S +R X 2004 o - Mar 21 0 1 D +R X 2004 o - S 21 0 0 S +R X 2005 o - Mar 22 0 1 D +R X 2005 o - S 22 0 0 S +R X 2008 o - Mar 21 0 1 D +R X 2008 o - S 21 0 0 S +R X 2009 2011 - Mar 22 0 1 D +R X 2009 2011 - S 22 0 0 S +R X 2012 o - Mar 21 0 1 D +R X 2012 o - S 21 0 0 S +R X 2013 2015 - Mar 22 0 1 D +R X 2013 2015 - S 22 0 0 S +R X 2016 o - Mar 21 0 1 D +R X 2016 o - S 21 0 0 S +R X 2017 2019 - Mar 22 0 1 D +R X 2017 2019 - S 22 0 0 S +R X 2020 o - Mar 21 0 1 D +R X 2020 o - S 21 0 0 S +R X 2021 2023 - Mar 22 0 1 D +R X 2021 2023 - S 22 0 0 S +R X 2024 o - Mar 21 0 1 D +R X 2024 o - S 21 0 0 S +R X 2025 2027 - Mar 22 0 1 D +R X 2025 2027 - S 22 0 0 S +R X 2028 2029 - Mar 21 0 1 D +R X 2028 2029 - S 21 0 0 S +R X 2030 2031 - Mar 22 0 1 D +R X 2030 2031 - S 22 0 0 S +R X 2032 2033 - Mar 21 0 1 D +R X 2032 2033 - S 21 0 0 S +R X 2034 2035 - Mar 22 0 1 D +R X 2034 2035 - S 22 0 0 S +R X 2036 ma - Mar 21 0 1 D +R X 2036 ma - S 21 0 0 S +Z Asia/Tehran 3:25:44 - LMT 1916 +3:25:44 - TMT 1946 +3:30 - +0330 1977 N +4 X +04/+05 1979 +3:30 X +0330/+0430 +R Y 1982 o - May 1 0 1 D +R Y 1982 1984 - O 1 0 0 S +R Y 1983 o - Mar 31 0 1 D +R Y 1984 1985 - Ap 1 0 1 D +R Y 1985 1990 - S lastSun 1s 0 S +R Y 1986 1990 - Mar lastSun 1s 1 D +R Y 1991 2007 - Ap 1 3s 1 D +R Y 1991 2007 - O 1 3s 0 S +Z Asia/Baghdad 2:57:40 - LMT 1890 +2:57:36 - BMT 1918 +3 - +03 1982 May +3 Y +03/+04 +R Z 1940 o - Jun 1 0 1 D +R Z 1942 1944 - N 1 0 0 S +R Z 1943 o - Ap 1 2 1 D +R Z 1944 o - Ap 1 0 1 D +R Z 1945 o - Ap 16 0 1 D +R Z 1945 o - N 1 2 0 S +R Z 1946 o - Ap 16 2 1 D +R Z 1946 o - N 1 0 0 S +R Z 1948 o - May 23 0 2 DD +R Z 1948 o - S 1 0 1 D +R Z 1948 1949 - N 1 2 0 S +R Z 1949 o - May 1 0 1 D +R Z 1950 o - Ap 16 0 1 D +R Z 1950 o - S 15 3 0 S +R Z 1951 o - Ap 1 0 1 D +R Z 1951 o - N 11 3 0 S +R Z 1952 o - Ap 20 2 1 D +R Z 1952 o - O 19 3 0 S +R Z 1953 o - Ap 12 2 1 D +R Z 1953 o - S 13 3 0 S +R Z 1954 o - Jun 13 0 1 D +R Z 1954 o - S 12 0 0 S +R Z 1955 o - Jun 11 2 1 D +R Z 1955 o - S 11 0 0 S +R Z 1956 o - Jun 3 0 1 D +R Z 1956 o - S 30 3 0 S +R Z 1957 o - Ap 29 2 1 D +R Z 1957 o - S 22 0 0 S +R Z 1974 o - Jul 7 0 1 D +R Z 1974 o - O 13 0 0 S +R Z 1975 o - Ap 20 0 1 D +R Z 1975 o - Au 31 0 0 S +R Z 1985 o - Ap 14 0 1 D +R Z 1985 o - S 15 0 0 S +R Z 1986 o - May 18 0 1 D +R Z 1986 o - S 7 0 0 S +R Z 1987 o - Ap 15 0 1 D +R Z 1987 o - S 13 0 0 S +R Z 1988 o - Ap 10 0 1 D +R Z 1988 o - S 4 0 0 S +R Z 1989 o - Ap 30 0 1 D +R Z 1989 o - S 3 0 0 S +R Z 1990 o - Mar 25 0 1 D +R Z 1990 o - Au 26 0 0 S +R Z 1991 o - Mar 24 0 1 D +R Z 1991 o - S 1 0 0 S +R Z 1992 o - Mar 29 0 1 D +R Z 1992 o - S 6 0 0 S +R Z 1993 o - Ap 2 0 1 D +R Z 1993 o - S 5 0 0 S +R Z 1994 o - Ap 1 0 1 D +R Z 1994 o - Au 28 0 0 S +R Z 1995 o - Mar 31 0 1 D +R Z 1995 o - S 3 0 0 S +R Z 1996 o - Mar 15 0 1 D +R Z 1996 o - S 16 0 0 S +R Z 1997 o - Mar 21 0 1 D +R Z 1997 o - S 14 0 0 S +R Z 1998 o - Mar 20 0 1 D +R Z 1998 o - S 6 0 0 S +R Z 1999 o - Ap 2 2 1 D +R Z 1999 o - S 3 2 0 S +R Z 2000 o - Ap 14 2 1 D +R Z 2000 o - O 6 1 0 S +R Z 2001 o - Ap 9 1 1 D +R Z 2001 o - S 24 1 0 S +R Z 2002 o - Mar 29 1 1 D +R Z 2002 o - O 7 1 0 S +R Z 2003 o - Mar 28 1 1 D +R Z 2003 o - O 3 1 0 S +R Z 2004 o - Ap 7 1 1 D +R Z 2004 o - S 22 1 0 S +R Z 2005 o - Ap 1 2 1 D +R Z 2005 o - O 9 2 0 S +R Z 2006 2010 - Mar F>=26 2 1 D +R Z 2006 o - O 1 2 0 S +R Z 2007 o - S 16 2 0 S +R Z 2008 o - O 5 2 0 S +R Z 2009 o - S 27 2 0 S +R Z 2010 o - S 12 2 0 S +R Z 2011 o - Ap 1 2 1 D +R Z 2011 o - O 2 2 0 S +R Z 2012 o - Mar F>=26 2 1 D +R Z 2012 o - S 23 2 0 S +R Z 2013 ma - Mar F>=23 2 1 D +R Z 2013 ma - O lastSun 2 0 S +Z Asia/Jerusalem 2:20:54 - LMT 1880 +2:20:40 - JMT 1918 +2 Z I%sT +R a 1948 o - May Sun>=1 2 1 D +R a 1948 1951 - S Sat>=8 2 0 S +R a 1949 o - Ap Sun>=1 2 1 D +R a 1950 1951 - May Sun>=1 2 1 D +Z Asia/Tokyo 9:18:59 - LMT 1887 D 31 15u +9 a J%sT +R b 1973 o - Jun 6 0 1 S +R b 1973 1975 - O 1 0 0 - +R b 1974 1977 - May 1 0 1 S +R b 1976 o - N 1 0 0 - +R b 1977 o - O 1 0 0 - +R b 1978 o - Ap 30 0 1 S +R b 1978 o - S 30 0 0 - +R b 1985 o - Ap 1 0 1 S +R b 1985 o - O 1 0 0 - +R b 1986 1988 - Ap F>=1 0 1 S +R b 1986 1990 - O F>=1 0 0 - +R b 1989 o - May 8 0 1 S +R b 1990 o - Ap 27 0 1 S +R b 1991 o - Ap 17 0 1 S +R b 1991 o - S 27 0 0 - +R b 1992 o - Ap 10 0 1 S +R b 1992 1993 - O F>=1 0 0 - +R b 1993 1998 - Ap F>=1 0 1 S +R b 1994 o - S F>=15 0 0 - +R b 1995 1998 - S F>=15 0s 0 - +R b 1999 o - Jul 1 0s 1 S +R b 1999 2002 - S lastF 0s 0 - +R b 2000 2001 - Mar lastTh 0s 1 S +R b 2002 2012 - Mar lastTh 24 1 S +R b 2003 o - O 24 0s 0 - +R b 2004 o - O 15 0s 0 - +R b 2005 o - S lastF 0s 0 - +R b 2006 2011 - O lastF 0s 0 - +R b 2013 o - D 20 0 0 - +R b 2014 ma - Mar lastTh 24 1 S +R b 2014 ma - O lastF 0s 0 - +Z Asia/Amman 2:23:44 - LMT 1931 +2 b EE%sT +Z Asia/Almaty 5:7:48 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 M +06/+07 1991 Mar 31 2s +5 M +05/+06 1992 Ja 19 2s +6 M +06/+07 2004 O 31 2s +6 - +06 +Z Asia/Qyzylorda 4:21:52 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 M +05/+06 1991 Mar 31 2s +4 M +04/+05 1991 S 29 2s +5 M +05/+06 1992 Ja 19 2s +6 M +06/+07 1992 Mar 29 2s +5 M +05/+06 2004 O 31 2s +6 - +06 +Z Asia/Aqtobe 3:48:40 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 M +05/+06 1991 Mar 31 2s +4 M +04/+05 1992 Ja 19 2s +5 M +05/+06 2004 O 31 2s +5 - +05 +Z Asia/Aqtau 3:21:4 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 O +6 - +06 1982 Ap +5 M +05/+06 1991 Mar 31 2s +4 M +04/+05 1992 Ja 19 2s +5 M +05/+06 1994 S 25 2s +4 M +04/+05 2004 O 31 2s +5 - +05 +Z Asia/Atyrau 3:27:44 - LMT 1924 May 2 +3 - +03 1930 Jun 21 +5 - +05 1981 O +6 - +06 1982 Ap +5 M +05/+06 1991 Mar 31 2s +4 M +04/+05 1992 Ja 19 2s +5 M +05/+06 1999 Mar 28 2s +4 M +04/+05 2004 O 31 2s +5 - +05 +Z Asia/Oral 3:25:24 - LMT 1924 May 2 +3 - +03 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 M +05/+06 1989 Mar 26 2s +4 M +04/+05 1992 Ja 19 2s +5 M +05/+06 1992 Mar 29 2s +4 M +04/+05 2004 O 31 2s +5 - +05 +R c 1992 1996 - Ap Sun>=7 0s 1 S +R c 1992 1996 - S lastSun 0 0 - +R c 1997 2005 - Mar lastSun 2:30 1 S +R c 1997 2004 - O lastSun 2:30 0 - +Z Asia/Bishkek 4:58:24 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 M +06/+07 1991 Mar 31 2s +5 M +05/+06 1991 Au 31 2 +5 c +05/+06 2005 Au 12 +6 - +06 +R d 1948 o - Jun 1 0 1 D +R d 1948 o - S 13 0 0 S +R d 1949 o - Ap 3 0 1 D +R d 1949 1951 - S Sun>=8 0 0 S +R d 1950 o - Ap 1 0 1 D +R d 1951 o - May 6 0 1 D +R d 1955 o - May 5 0 1 D +R d 1955 o - S 9 0 0 S +R d 1956 o - May 20 0 1 D +R d 1956 o - S 30 0 0 S +R d 1957 1960 - May Sun>=1 0 1 D +R d 1957 1960 - S Sun>=18 0 0 S +R d 1987 1988 - May Sun>=8 2 1 D +R d 1987 1988 - O Sun>=8 3 0 S +Z Asia/Seoul 8:27:52 - LMT 1908 Ap +8:30 - KST 1912 +9 - JST 1945 S 8 +9 - KST 1954 Mar 21 +8:30 d K%sT 1961 Au 10 +9 d K%sT +Z Asia/Pyongyang 8:23 - LMT 1908 Ap +8:30 - KST 1912 +9 - JST 1945 Au 24 +9 - KST 2015 Au 15 +8:30 - KST +R e 1920 o - Mar 28 0 1 S +R e 1920 o - O 25 0 0 - +R e 1921 o - Ap 3 0 1 S +R e 1921 o - O 3 0 0 - +R e 1922 o - Mar 26 0 1 S +R e 1922 o - O 8 0 0 - +R e 1923 o - Ap 22 0 1 S +R e 1923 o - S 16 0 0 - +R e 1957 1961 - May 1 0 1 S +R e 1957 1961 - O 1 0 0 - +R e 1972 o - Jun 22 0 1 S +R e 1972 1977 - O 1 0 0 - +R e 1973 1977 - May 1 0 1 S +R e 1978 o - Ap 30 0 1 S +R e 1978 o - S 30 0 0 - +R e 1984 1987 - May 1 0 1 S +R e 1984 1991 - O 16 0 0 - +R e 1988 o - Jun 1 0 1 S +R e 1989 o - May 10 0 1 S +R e 1990 1992 - May 1 0 1 S +R e 1992 o - O 4 0 0 - +R e 1993 ma - Mar lastSun 0 1 S +R e 1993 1998 - S lastSun 0 0 - +R e 1999 ma - O lastSun 0 0 - +Z Asia/Beirut 2:22 - LMT 1880 +2 e EE%sT +R f 1935 1941 - S 14 0 0:20 TS +R f 1935 1941 - D 14 0 0 - +Z Asia/Kuala_Lumpur 6:46:46 - LMT 1901 +6:55:25 - SMT 1905 Jun +7 - +07 1933 +7 0:20 +0720 1936 +7:20 - +0720 1941 S +7:30 - +0730 1942 F 16 +9 - +09 1945 S 12 +7:30 - +0730 1982 +8 - +08 +Z Asia/Kuching 7:21:20 - LMT 1926 Mar +7:30 - +0730 1933 +8 f +08/+0820 1942 F 16 +9 - +09 1945 S 12 +8 - +08 +Z Indian/Maldives 4:54 - LMT 1880 +4:54 - MMT 1960 +5 - +05 +R g 1983 1984 - Ap 1 0 1 S +R g 1983 o - O 1 0 0 - +R g 1985 1998 - Mar lastSun 0 1 S +R g 1984 1998 - S lastSun 0 0 - +R g 2001 o - Ap lastSat 2 1 S +R g 2001 2006 - S lastSat 2 0 - +R g 2002 2006 - Mar lastSat 2 1 S +R g 2015 2016 - Mar lastSat 2 1 S +R g 2015 2016 - S lastSat 0 0 - +Z Asia/Hovd 6:6:36 - LMT 1905 Au +6 - +06 1978 +7 g +07/+08 +Z Asia/Ulaanbaatar 7:7:32 - LMT 1905 Au +7 - +07 1978 +8 g +08/+09 +Z Asia/Choibalsan 7:38 - LMT 1905 Au +7 - +07 1978 +8 - +08 1983 Ap +9 g +09/+10 2008 Mar 31 +8 g +08/+09 +Z Asia/Kathmandu 5:41:16 - LMT 1920 +5:30 - +0530 1986 +5:45 - +0545 +R h 2002 o - Ap Sun>=2 0 1 S +R h 2002 o - O Sun>=2 0 0 - +R h 2008 o - Jun 1 0 1 S +R h 2008 2009 - N 1 0 0 - +R h 2009 o - Ap 15 0 1 S +Z Asia/Karachi 4:28:12 - LMT 1907 +5:30 - +0530 1942 S +5:30 1 +0630 1945 O 15 +5:30 - +0530 1951 S 30 +5 - +05 1971 Mar 26 +5 h PK%sT +R i 1999 2005 - Ap F>=15 0 1 S +R i 1999 2003 - O F>=15 0 0 - +R i 2004 o - O 1 1 0 - +R i 2005 o - O 4 2 0 - +R i 2006 2007 - Ap 1 0 1 S +R i 2006 o - S 22 0 0 - +R i 2007 o - S Th>=8 2 0 - +R i 2008 2009 - Mar lastF 0 1 S +R i 2008 o - S 1 0 0 - +R i 2009 o - S F>=1 1 0 - +R i 2010 o - Mar 26 0 1 S +R i 2010 o - Au 11 0 0 - +R i 2011 o - Ap 1 0:1 1 S +R i 2011 o - Au 1 0 0 - +R i 2011 o - Au 30 0 1 S +R i 2011 o - S 30 0 0 - +R i 2012 2014 - Mar lastTh 24 1 S +R i 2012 o - S 21 1 0 - +R i 2013 o - S F>=21 0 0 - +R i 2014 2015 - O F>=21 0 0 - +R i 2015 o - Mar lastF 24 1 S +R i 2016 ma - Mar lastSat 1 1 S +R i 2016 ma - O lastSat 1 0 - +Z Asia/Gaza 2:17:52 - LMT 1900 O +2 Z EET/EEST 1948 May 15 +2 B EE%sT 1967 Jun 5 +2 Z I%sT 1996 +2 b EE%sT 1999 +2 i EE%sT 2008 Au 29 +2 - EET 2008 S +2 i EE%sT 2010 +2 - EET 2010 Mar 27 0:1 +2 i EE%sT 2011 Au +2 - EET 2012 +2 i EE%sT +Z Asia/Hebron 2:20:23 - LMT 1900 O +2 Z EET/EEST 1948 May 15 +2 B EE%sT 1967 Jun 5 +2 Z I%sT 1996 +2 b EE%sT 1999 +2 i EE%sT +R j 1936 o - N 1 0 1 S +R j 1937 o - F 1 0 0 - +R j 1954 o - Ap 12 0 1 S +R j 1954 o - Jul 1 0 0 - +R j 1978 o - Mar 22 0 1 S +R j 1978 o - S 21 0 0 - +Z Asia/Manila -15:56 - LMT 1844 D 31 +8:4 - LMT 1899 May 11 +8 j +08/+09 1942 May +9 - +09 1944 N +8 j +08/+09 +Z Asia/Qatar 3:26:8 - LMT 1920 +4 - +04 1972 Jun +3 - +03 +Li Asia/Qatar Asia/Bahrain +Z Asia/Riyadh 3:6:52 - LMT 1947 Mar 14 +3 - +03 +Li Asia/Riyadh Asia/Aden +Li Asia/Riyadh Asia/Kuwait +Z Asia/Singapore 6:55:25 - LMT 1901 +6:55:25 - SMT 1905 Jun +7 - +07 1933 +7 0:20 +0720 1936 +7:20 - +0720 1941 S +7:30 - +0730 1942 F 16 +9 - +09 1945 S 12 +7:30 - +0730 1982 +8 - +08 +Z Asia/Colombo 5:19:24 - LMT 1880 +5:19:32 - MMT 1906 +5:30 - +0530 1942 Ja 5 +5:30 0:30 +06 1942 S +5:30 1 +0630 1945 O 16 2 +5:30 - +0530 1996 May 25 +6:30 - +0630 1996 O 26 0:30 +6 - +06 2006 Ap 15 0:30 +5:30 - +0530 +R k 1920 1923 - Ap Sun>=15 2 1 S +R k 1920 1923 - O Sun>=1 2 0 - +R k 1962 o - Ap 29 2 1 S +R k 1962 o - O 1 2 0 - +R k 1963 1965 - May 1 2 1 S +R k 1963 o - S 30 2 0 - +R k 1964 o - O 1 2 0 - +R k 1965 o - S 30 2 0 - +R k 1966 o - Ap 24 2 1 S +R k 1966 1976 - O 1 2 0 - +R k 1967 1978 - May 1 2 1 S +R k 1977 1978 - S 1 2 0 - +R k 1983 1984 - Ap 9 2 1 S +R k 1983 1984 - O 1 2 0 - +R k 1986 o - F 16 2 1 S +R k 1986 o - O 9 2 0 - +R k 1987 o - Mar 1 2 1 S +R k 1987 1988 - O 31 2 0 - +R k 1988 o - Mar 15 2 1 S +R k 1989 o - Mar 31 2 1 S +R k 1989 o - O 1 2 0 - +R k 1990 o - Ap 1 2 1 S +R k 1990 o - S 30 2 0 - +R k 1991 o - Ap 1 0 1 S +R k 1991 1992 - O 1 0 0 - +R k 1992 o - Ap 8 0 1 S +R k 1993 o - Mar 26 0 1 S +R k 1993 o - S 25 0 0 - +R k 1994 1996 - Ap 1 0 1 S +R k 1994 2005 - O 1 0 0 - +R k 1997 1998 - Mar lastM 0 1 S +R k 1999 2006 - Ap 1 0 1 S +R k 2006 o - S 22 0 0 - +R k 2007 o - Mar lastF 0 1 S +R k 2007 o - N F>=1 0 0 - +R k 2008 o - Ap F>=1 0 1 S +R k 2008 o - N 1 0 0 - +R k 2009 o - Mar lastF 0 1 S +R k 2010 2011 - Ap F>=1 0 1 S +R k 2012 ma - Mar lastF 0 1 S +R k 2009 ma - O lastF 0 0 - +Z Asia/Damascus 2:25:12 - LMT 1920 +2 k EE%sT +Z Asia/Dushanbe 4:35:12 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 M +06/+07 1991 Mar 31 2s +5 1 +05/+06 1991 S 9 2s +5 - +05 +Z Asia/Bangkok 6:42:4 - LMT 1880 +6:42:4 - BMT 1920 Ap +7 - +07 +Li Asia/Bangkok Asia/Phnom_Penh +Li Asia/Bangkok Asia/Vientiane +Z Asia/Ashgabat 3:53:32 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 M +05/+06 1991 Mar 31 2 +4 M +04/+05 1992 Ja 19 2 +5 - +05 +Z Asia/Dubai 3:41:12 - LMT 1920 +4 - +04 +Li Asia/Dubai Asia/Muscat +Z Asia/Samarkand 4:27:53 - LMT 1924 May 2 +4 - +04 1930 Jun 21 +5 - +05 1981 Ap +5 1 +06 1981 O +6 - +06 1982 Ap +5 M +05/+06 1992 +5 - +05 +Z Asia/Tashkent 4:37:11 - LMT 1924 May 2 +5 - +05 1930 Jun 21 +6 M +06/+07 1991 Mar 31 2 +5 M +05/+06 1992 +5 - +05 +Z Asia/Ho_Chi_Minh 7:6:40 - LMT 1906 Jul +7:6:30 - PLMT 1911 May +7 - +07 1942 D 31 23 +8 - +08 1945 Mar 14 23 +9 - +09 1945 S 2 +7 - +07 1947 Ap +8 - +08 1955 Jul +7 - +07 1959 D 31 23 +8 - +08 1975 Jun 13 +7 - +07 +R l 1917 o - Ja 1 0:1 1 D +R l 1917 o - Mar 25 2 0 S +R l 1942 o - Ja 1 2 1 D +R l 1942 o - Mar 29 2 0 S +R l 1942 o - S 27 2 1 D +R l 1943 1944 - Mar lastSun 2 0 S +R l 1943 o - O 3 2 1 D +Z Australia/Darwin 8:43:20 - LMT 1895 F +9 - ACST 1899 May +9:30 l AC%sT +R m 1974 o - O lastSun 2s 1 D +R m 1975 o - Mar Sun>=1 2s 0 S +R m 1983 o - O lastSun 2s 1 D +R m 1984 o - Mar Sun>=1 2s 0 S +R m 1991 o - N 17 2s 1 D +R m 1992 o - Mar Sun>=1 2s 0 S +R m 2006 o - D 3 2s 1 D +R m 2007 2009 - Mar lastSun 2s 0 S +R m 2007 2008 - O lastSun 2s 1 D +Z Australia/Perth 7:43:24 - LMT 1895 D +8 l AW%sT 1943 Jul +8 m AW%sT +Z Australia/Eucla 8:35:28 - LMT 1895 D +8:45 l +0845/+0945 1943 Jul +8:45 m +0845/+0945 +R n 1971 o - O lastSun 2s 1 D +R n 1972 o - F lastSun 2s 0 S +R n 1989 1991 - O lastSun 2s 1 D +R n 1990 1992 - Mar Sun>=1 2s 0 S +R o 1992 1993 - O lastSun 2s 1 D +R o 1993 1994 - Mar Sun>=1 2s 0 S +Z Australia/Brisbane 10:12:8 - LMT 1895 +10 l AE%sT 1971 +10 n AE%sT +Z Australia/Lindeman 9:55:56 - LMT 1895 +10 l AE%sT 1971 +10 n AE%sT 1992 Jul +10 o AE%sT +R p 1971 1985 - O lastSun 2s 1 D +R p 1986 o - O 19 2s 1 D +R p 1987 2007 - O lastSun 2s 1 D +R p 1972 o - F 27 2s 0 S +R p 1973 1985 - Mar Sun>=1 2s 0 S +R p 1986 1990 - Mar Sun>=15 2s 0 S +R p 1991 o - Mar 3 2s 0 S +R p 1992 o - Mar 22 2s 0 S +R p 1993 o - Mar 7 2s 0 S +R p 1994 o - Mar 20 2s 0 S +R p 1995 2005 - Mar lastSun 2s 0 S +R p 2006 o - Ap 2 2s 0 S +R p 2007 o - Mar lastSun 2s 0 S +R p 2008 ma - Ap Sun>=1 2s 0 S +R p 2008 ma - O Sun>=1 2s 1 D +Z Australia/Adelaide 9:14:20 - LMT 1895 F +9 - ACST 1899 May +9:30 l AC%sT 1971 +9:30 p AC%sT +R q 1967 o - O Sun>=1 2s 1 D +R q 1968 o - Mar lastSun 2s 0 S +R q 1968 1985 - O lastSun 2s 1 D +R q 1969 1971 - Mar Sun>=8 2s 0 S +R q 1972 o - F lastSun 2s 0 S +R q 1973 1981 - Mar Sun>=1 2s 0 S +R q 1982 1983 - Mar lastSun 2s 0 S +R q 1984 1986 - Mar Sun>=1 2s 0 S +R q 1986 o - O Sun>=15 2s 1 D +R q 1987 1990 - Mar Sun>=15 2s 0 S +R q 1987 o - O Sun>=22 2s 1 D +R q 1988 1990 - O lastSun 2s 1 D +R q 1991 1999 - O Sun>=1 2s 1 D +R q 1991 2005 - Mar lastSun 2s 0 S +R q 2000 o - Au lastSun 2s 1 D +R q 2001 ma - O Sun>=1 2s 1 D +R q 2006 o - Ap Sun>=1 2s 0 S +R q 2007 o - Mar lastSun 2s 0 S +R q 2008 ma - Ap Sun>=1 2s 0 S +Z Australia/Hobart 9:49:16 - LMT 1895 S +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 l AE%sT 1967 +10 q AE%sT +Z Australia/Currie 9:35:28 - LMT 1895 S +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 l AE%sT 1971 Jul +10 q AE%sT +R r 1971 1985 - O lastSun 2s 1 D +R r 1972 o - F lastSun 2s 0 S +R r 1973 1985 - Mar Sun>=1 2s 0 S +R r 1986 1990 - Mar Sun>=15 2s 0 S +R r 1986 1987 - O Sun>=15 2s 1 D +R r 1988 1999 - O lastSun 2s 1 D +R r 1991 1994 - Mar Sun>=1 2s 0 S +R r 1995 2005 - Mar lastSun 2s 0 S +R r 2000 o - Au lastSun 2s 1 D +R r 2001 2007 - O lastSun 2s 1 D +R r 2006 o - Ap Sun>=1 2s 0 S +R r 2007 o - Mar lastSun 2s 0 S +R r 2008 ma - Ap Sun>=1 2s 0 S +R r 2008 ma - O Sun>=1 2s 1 D +Z Australia/Melbourne 9:39:52 - LMT 1895 F +10 l AE%sT 1971 +10 r AE%sT +R s 1971 1985 - O lastSun 2s 1 D +R s 1972 o - F 27 2s 0 S +R s 1973 1981 - Mar Sun>=1 2s 0 S +R s 1982 o - Ap Sun>=1 2s 0 S +R s 1983 1985 - Mar Sun>=1 2s 0 S +R s 1986 1989 - Mar Sun>=15 2s 0 S +R s 1986 o - O 19 2s 1 D +R s 1987 1999 - O lastSun 2s 1 D +R s 1990 1995 - Mar Sun>=1 2s 0 S +R s 1996 2005 - Mar lastSun 2s 0 S +R s 2000 o - Au lastSun 2s 1 D +R s 2001 2007 - O lastSun 2s 1 D +R s 2006 o - Ap Sun>=1 2s 0 S +R s 2007 o - Mar lastSun 2s 0 S +R s 2008 ma - Ap Sun>=1 2s 0 S +R s 2008 ma - O Sun>=1 2s 1 D +Z Australia/Sydney 10:4:52 - LMT 1895 F +10 l AE%sT 1971 +10 s AE%sT +Z Australia/Broken_Hill 9:25:48 - LMT 1895 F +10 - AEST 1896 Au 23 +9 - ACST 1899 May +9:30 l AC%sT 1971 +9:30 s AC%sT 2000 +9:30 p AC%sT +R t 1981 1984 - O lastSun 2 1 D +R t 1982 1985 - Mar Sun>=1 2 0 S +R t 1985 o - O lastSun 2 0:30 D +R t 1986 1989 - Mar Sun>=15 2 0 S +R t 1986 o - O 19 2 0:30 D +R t 1987 1999 - O lastSun 2 0:30 D +R t 1990 1995 - Mar Sun>=1 2 0 S +R t 1996 2005 - Mar lastSun 2 0 S +R t 2000 o - Au lastSun 2 0:30 D +R t 2001 2007 - O lastSun 2 0:30 D +R t 2006 o - Ap Sun>=1 2 0 S +R t 2007 o - Mar lastSun 2 0 S +R t 2008 ma - Ap Sun>=1 2 0 S +R t 2008 ma - O Sun>=1 2 0:30 D +Z Australia/Lord_Howe 10:36:20 - LMT 1895 F +10 - AEST 1981 Mar +10:30 t +1030/+1130 1985 Jul +10:30 t +1030/+11 +Z Antarctica/Macquarie 0 - -00 1899 N +10 - AEST 1916 O 1 2 +10 1 AEDT 1917 F +10 l AE%sT 1919 Ap 1 0s +0 - -00 1948 Mar 25 +10 l AE%sT 1967 +10 q AE%sT 2010 Ap 4 3 +11 - +11 +Z Indian/Christmas 7:2:52 - LMT 1895 F +7 - +07 +Z Indian/Cocos 6:27:40 - LMT 1900 +6:30 - +0630 +R u 1998 1999 - N Sun>=1 2 1 S +R u 1999 2000 - F lastSun 3 0 - +R u 2009 o - N 29 2 1 S +R u 2010 o - Mar lastSun 3 0 - +R u 2010 2013 - O Sun>=21 2 1 S +R u 2011 o - Mar Sun>=1 3 0 - +R u 2012 2013 - Ja Sun>=18 3 0 - +R u 2014 o - Ja Sun>=18 2 0 - +R u 2014 ma - N Sun>=1 2 1 S +R u 2015 ma - Ja Sun>=14 3 0 - +Z Pacific/Fiji 11:55:44 - LMT 1915 O 26 +12 u +12/+13 +Z Pacific/Gambier -8:59:48 - LMT 1912 O +-9 - -09 +Z Pacific/Marquesas -9:18 - LMT 1912 O +-9:30 - -0930 +Z Pacific/Tahiti -9:58:16 - LMT 1912 O +-10 - -10 +Z Pacific/Guam -14:21 - LMT 1844 D 31 +9:39 - LMT 1901 +10 - GST 2000 D 23 +10 - ChST +Li Pacific/Guam Pacific/Saipan +Z Pacific/Tarawa 11:32:4 - LMT 1901 +12 - +12 +Z Pacific/Enderbury -11:24:20 - LMT 1901 +-12 - -12 1979 O +-11 - -11 1995 +13 - +13 +Z Pacific/Kiritimati -10:29:20 - LMT 1901 +-10:40 - -1040 1979 O +-10 - -10 1995 +14 - +14 +Z Pacific/Majuro 11:24:48 - LMT 1901 +11 - +11 1969 O +12 - +12 +Z Pacific/Kwajalein 11:9:20 - LMT 1901 +11 - +11 1969 O +-12 - -12 1993 Au 20 +12 - +12 +Z Pacific/Chuuk 10:7:8 - LMT 1901 +10 - +10 +Z Pacific/Pohnpei 10:32:52 - LMT 1901 +11 - +11 +Z Pacific/Kosrae 10:51:56 - LMT 1901 +11 - +11 1969 O +12 - +12 1999 +11 - +11 +Z Pacific/Nauru 11:7:40 - LMT 1921 Ja 15 +11:30 - +1130 1942 Mar 15 +9 - +09 1944 Au 15 +11:30 - +1130 1979 May +12 - +12 +R v 1977 1978 - D Sun>=1 0 1 S +R v 1978 1979 - F 27 0 0 - +R v 1996 o - D 1 2s 1 S +R v 1997 o - Mar 2 2s 0 - +Z Pacific/Noumea 11:5:48 - LMT 1912 Ja 13 +11 v +11/+12 +R w 1927 o - N 6 2 1 S +R w 1928 o - Mar 4 2 0 M +R w 1928 1933 - O Sun>=8 2 0:30 S +R w 1929 1933 - Mar Sun>=15 2 0 M +R w 1934 1940 - Ap lastSun 2 0 M +R w 1934 1940 - S lastSun 2 0:30 S +R w 1946 o - Ja 1 0 0 S +R w 1974 o - N Sun>=1 2s 1 D +R x 1974 o - N Sun>=1 2:45s 1 D +R w 1975 o - F lastSun 2s 0 S +R x 1975 o - F lastSun 2:45s 0 S +R w 1975 1988 - O lastSun 2s 1 D +R x 1975 1988 - O lastSun 2:45s 1 D +R w 1976 1989 - Mar Sun>=1 2s 0 S +R x 1976 1989 - Mar Sun>=1 2:45s 0 S +R w 1989 o - O Sun>=8 2s 1 D +R x 1989 o - O Sun>=8 2:45s 1 D +R w 1990 2006 - O Sun>=1 2s 1 D +R x 1990 2006 - O Sun>=1 2:45s 1 D +R w 1990 2007 - Mar Sun>=15 2s 0 S +R x 1990 2007 - Mar Sun>=15 2:45s 0 S +R w 2007 ma - S lastSun 2s 1 D +R x 2007 ma - S lastSun 2:45s 1 D +R w 2008 ma - Ap Sun>=1 2s 0 S +R x 2008 ma - Ap Sun>=1 2:45s 0 S +Z Pacific/Auckland 11:39:4 - LMT 1868 N 2 +11:30 w NZ%sT 1946 +12 w NZ%sT +Z Pacific/Chatham 12:13:48 - LMT 1868 N 2 +12:15 - +1215 1946 +12:45 x +1245/+1345 +Li Pacific/Auckland Antarctica/McMurdo +R y 1978 o - N 12 0 0:30 HS +R y 1979 1991 - Mar Sun>=1 0 0 - +R y 1979 1990 - O lastSun 0 0:30 HS +Z Pacific/Rarotonga -10:39:4 - LMT 1901 +-10:30 - -1030 1978 N 12 +-10 y -10/-0930 +Z Pacific/Niue -11:19:40 - LMT 1901 +-11:20 - -1120 1951 +-11:30 - -1130 1978 O +-11 - -11 +Z Pacific/Norfolk 11:11:52 - LMT 1901 +11:12 - +1112 1951 +11:30 - +1130 1974 O 27 2 +11:30 1 +1230 1975 Mar 2 2 +11:30 - +1130 2015 O 4 2 +11 - +11 +Z Pacific/Palau 8:57:56 - LMT 1901 +9 - +09 +Z Pacific/Port_Moresby 9:48:40 - LMT 1880 +9:48:32 - PMMT 1895 +10 - +10 +Z Pacific/Bougainville 10:22:16 - LMT 1880 +9:48:32 - PMMT 1895 +10 - +10 1942 Jul +9 - +09 1945 Au 21 +10 - +10 2014 D 28 2 +11 - +11 +Z Pacific/Pitcairn -8:40:20 - LMT 1901 +-8:30 - -0830 1998 Ap 27 +-8 - -08 +Z Pacific/Pago_Pago 12:37:12 - LMT 1892 Jul 5 +-11:22:48 - LMT 1911 +-11 - SST +Li Pacific/Pago_Pago Pacific/Midway +R z 2010 o - S lastSun 0 1 D +R z 2011 o - Ap Sat>=1 4 0 S +R z 2011 o - S lastSat 3 1 D +R z 2012 ma - Ap Sun>=1 4 0 S +R z 2012 ma - S lastSun 3 1 D +Z Pacific/Apia 12:33:4 - LMT 1892 Jul 5 +-11:26:56 - LMT 1911 +-11:30 - -1130 1950 +-11 z -11/-10 2011 D 29 24 +13 z +13/+14 +Z Pacific/Guadalcanal 10:39:48 - LMT 1912 O +11 - +11 +Z Pacific/Fakaofo -11:24:56 - LMT 1901 +-11 - -11 2011 D 30 +13 - +13 +R ! 1999 o - O 7 2s 1 S +R ! 2000 o - Mar 19 2s 0 - +R ! 2000 2001 - N Sun>=1 2 1 S +R ! 2001 2002 - Ja lastSun 2 0 - +R ! 2016 o - N Sun>=1 2 1 S +R ! 2017 o - Ja Sun>=15 3 0 - +Z Pacific/Tongatapu 12:19:20 - LMT 1901 +12:20 - +1220 1941 +13 - +13 1999 +13 ! +13/+14 +Z Pacific/Funafuti 11:56:52 - LMT 1901 +12 - +12 +Z Pacific/Wake 11:6:28 - LMT 1901 +12 - +12 +R $ 1983 o - S 25 0 1 S +R $ 1984 1991 - Mar Sun>=23 0 0 - +R $ 1984 o - O 23 0 1 S +R $ 1985 1991 - S Sun>=23 0 1 S +R $ 1992 1993 - Ja Sun>=23 0 0 - +R $ 1992 o - O Sun>=23 0 1 S +Z Pacific/Efate 11:13:16 - LMT 1912 Ja 13 +11 $ +11/+12 +Z Pacific/Wallis 12:15:20 - LMT 1901 +12 - +12 +R % 1916 o - May 21 2s 1 BST +R % 1916 o - O 1 2s 0 GMT +R % 1917 o - Ap 8 2s 1 BST +R % 1917 o - S 17 2s 0 GMT +R % 1918 o - Mar 24 2s 1 BST +R % 1918 o - S 30 2s 0 GMT +R % 1919 o - Mar 30 2s 1 BST +R % 1919 o - S 29 2s 0 GMT +R % 1920 o - Mar 28 2s 1 BST +R % 1920 o - O 25 2s 0 GMT +R % 1921 o - Ap 3 2s 1 BST +R % 1921 o - O 3 2s 0 GMT +R % 1922 o - Mar 26 2s 1 BST +R % 1922 o - O 8 2s 0 GMT +R % 1923 o - Ap Sun>=16 2s 1 BST +R % 1923 1924 - S Sun>=16 2s 0 GMT +R % 1924 o - Ap Sun>=9 2s 1 BST +R % 1925 1926 - Ap Sun>=16 2s 1 BST +R % 1925 1938 - O Sun>=2 2s 0 GMT +R % 1927 o - Ap Sun>=9 2s 1 BST +R % 1928 1929 - Ap Sun>=16 2s 1 BST +R % 1930 o - Ap Sun>=9 2s 1 BST +R % 1931 1932 - Ap Sun>=16 2s 1 BST +R % 1933 o - Ap Sun>=9 2s 1 BST +R % 1934 o - Ap Sun>=16 2s 1 BST +R % 1935 o - Ap Sun>=9 2s 1 BST +R % 1936 1937 - Ap Sun>=16 2s 1 BST +R % 1938 o - Ap Sun>=9 2s 1 BST +R % 1939 o - Ap Sun>=16 2s 1 BST +R % 1939 o - N Sun>=16 2s 0 GMT +R % 1940 o - F Sun>=23 2s 1 BST +R % 1941 o - May Sun>=2 1s 2 BDST +R % 1941 1943 - Au Sun>=9 1s 1 BST +R % 1942 1944 - Ap Sun>=2 1s 2 BDST +R % 1944 o - S Sun>=16 1s 1 BST +R % 1945 o - Ap M>=2 1s 2 BDST +R % 1945 o - Jul Sun>=9 1s 1 BST +R % 1945 1946 - O Sun>=2 2s 0 GMT +R % 1946 o - Ap Sun>=9 2s 1 BST +R % 1947 o - Mar 16 2s 1 BST +R % 1947 o - Ap 13 1s 2 BDST +R % 1947 o - Au 10 1s 1 BST +R % 1947 o - N 2 2s 0 GMT +R % 1948 o - Mar 14 2s 1 BST +R % 1948 o - O 31 2s 0 GMT +R % 1949 o - Ap 3 2s 1 BST +R % 1949 o - O 30 2s 0 GMT +R % 1950 1952 - Ap Sun>=14 2s 1 BST +R % 1950 1952 - O Sun>=21 2s 0 GMT +R % 1953 o - Ap Sun>=16 2s 1 BST +R % 1953 1960 - O Sun>=2 2s 0 GMT +R % 1954 o - Ap Sun>=9 2s 1 BST +R % 1955 1956 - Ap Sun>=16 2s 1 BST +R % 1957 o - Ap Sun>=9 2s 1 BST +R % 1958 1959 - Ap Sun>=16 2s 1 BST +R % 1960 o - Ap Sun>=9 2s 1 BST +R % 1961 1963 - Mar lastSun 2s 1 BST +R % 1961 1968 - O Sun>=23 2s 0 GMT +R % 1964 1967 - Mar Sun>=19 2s 1 BST +R % 1968 o - F 18 2s 1 BST +R % 1972 1980 - Mar Sun>=16 2s 1 BST +R % 1972 1980 - O Sun>=23 2s 0 GMT +R % 1981 1995 - Mar lastSun 1u 1 BST +R % 1981 1989 - O Sun>=23 1u 0 GMT +R % 1990 1995 - O Sun>=22 1u 0 GMT +Z Europe/London -0:1:15 - LMT 1847 D 1 0s +0 % %s 1968 O 27 +1 - BST 1971 O 31 2u +0 % %s 1996 +0 O GMT/BST +Li Europe/London Europe/Jersey +Li Europe/London Europe/Guernsey +Li Europe/London Europe/Isle_of_Man +Z Europe/Dublin -0:25 - LMT 1880 Au 2 +-0:25:21 - DMT 1916 May 21 2s +-0:25:21 1 IST 1916 O 1 2s +0 % %s 1921 D 6 +0 % GMT/IST 1940 F 25 2s +0 1 IST 1946 O 6 2s +0 - GMT 1947 Mar 16 2s +0 1 IST 1947 N 2 2s +0 - GMT 1948 Ap 18 2s +0 % GMT/IST 1968 O 27 +1 - IST 1971 O 31 2u +0 % GMT/IST 1996 +0 O GMT/IST +R O 1977 1980 - Ap Sun>=1 1u 1 S +R O 1977 o - S lastSun 1u 0 - +R O 1978 o - O 1 1u 0 - +R O 1979 1995 - S lastSun 1u 0 - +R O 1981 ma - Mar lastSun 1u 1 S +R O 1996 ma - O lastSun 1u 0 - +R & 1977 1980 - Ap Sun>=1 1s 1 S +R & 1977 o - S lastSun 1s 0 - +R & 1978 o - O 1 1s 0 - +R & 1979 1995 - S lastSun 1s 0 - +R & 1981 ma - Mar lastSun 1s 1 S +R & 1996 ma - O lastSun 1s 0 - +R ' 1916 o - Ap 30 23 1 S +R ' 1916 o - O 1 1 0 - +R ' 1917 1918 - Ap M>=15 2s 1 S +R ' 1917 1918 - S M>=15 2s 0 - +R ' 1940 o - Ap 1 2s 1 S +R ' 1942 o - N 2 2s 0 - +R ' 1943 o - Mar 29 2s 1 S +R ' 1943 o - O 4 2s 0 - +R ' 1944 1945 - Ap M>=1 2s 1 S +R ' 1944 o - O 2 2s 0 - +R ' 1945 o - S 16 2s 0 - +R ' 1977 1980 - Ap Sun>=1 2s 1 S +R ' 1977 o - S lastSun 2s 0 - +R ' 1978 o - O 1 2s 0 - +R ' 1979 1995 - S lastSun 2s 0 - +R ' 1981 ma - Mar lastSun 2s 1 S +R ' 1996 ma - O lastSun 2s 0 - +R W 1977 1980 - Ap Sun>=1 0 1 S +R W 1977 o - S lastSun 0 0 - +R W 1978 o - O 1 0 0 - +R W 1979 1995 - S lastSun 0 0 - +R W 1981 ma - Mar lastSun 0 1 S +R W 1996 ma - O lastSun 0 0 - +R M 1917 o - Jul 1 23 1 MST +R M 1917 o - D 28 0 0 MMT +R M 1918 o - May 31 22 2 MDST +R M 1918 o - S 16 1 1 MST +R M 1919 o - May 31 23 2 MDST +R M 1919 o - Jul 1 0u 1 MSD +R M 1919 o - Au 16 0 0 MSK +R M 1921 o - F 14 23 1 MSD +R M 1921 o - Mar 20 23 2 +05 +R M 1921 o - S 1 0 1 MSD +R M 1921 o - O 1 0 0 - +R M 1981 1984 - Ap 1 0 1 S +R M 1981 1983 - O 1 0 0 - +R M 1984 1995 - S lastSun 2s 0 - +R M 1985 2010 - Mar lastSun 2s 1 S +R M 1996 2010 - O lastSun 2s 0 - +Z WET 0 O WE%sT +Z CET 1 ' CE%sT +Z MET 1 ' ME%sT +Z EET 2 O EE%sT +R ( 1940 o - Jun 16 0 1 S +R ( 1942 o - N 2 3 0 - +R ( 1943 o - Mar 29 2 1 S +R ( 1943 o - Ap 10 3 0 - +R ( 1974 o - May 4 0 1 S +R ( 1974 o - O 2 0 0 - +R ( 1975 o - May 1 0 1 S +R ( 1975 o - O 2 0 0 - +R ( 1976 o - May 2 0 1 S +R ( 1976 o - O 3 0 0 - +R ( 1977 o - May 8 0 1 S +R ( 1977 o - O 2 0 0 - +R ( 1978 o - May 6 0 1 S +R ( 1978 o - O 1 0 0 - +R ( 1979 o - May 5 0 1 S +R ( 1979 o - S 30 0 0 - +R ( 1980 o - May 3 0 1 S +R ( 1980 o - O 4 0 0 - +R ( 1981 o - Ap 26 0 1 S +R ( 1981 o - S 27 0 0 - +R ( 1982 o - May 2 0 1 S +R ( 1982 o - O 3 0 0 - +R ( 1983 o - Ap 18 0 1 S +R ( 1983 o - O 1 0 0 - +R ( 1984 o - Ap 1 0 1 S +Z Europe/Tirane 1:19:20 - LMT 1914 +1 - CET 1940 Jun 16 +1 ( CE%sT 1984 Jul +1 O CE%sT +Z Europe/Andorra 0:6:4 - LMT 1901 +0 - WET 1946 S 30 +1 - CET 1985 Mar 31 2 +1 O CE%sT +R ) 1920 o - Ap 5 2s 1 S +R ) 1920 o - S 13 2s 0 - +R ) 1946 o - Ap 14 2s 1 S +R ) 1946 1948 - O Sun>=1 2s 0 - +R ) 1947 o - Ap 6 2s 1 S +R ) 1948 o - Ap 18 2s 1 S +R ) 1980 o - Ap 6 0 1 S +R ) 1980 o - S 28 0 0 - +Z Europe/Vienna 1:5:21 - LMT 1893 Ap +1 ' CE%sT 1920 +1 ) CE%sT 1940 Ap 1 2s +1 ' CE%sT 1945 Ap 2 2s +1 1 CEST 1945 Ap 12 2s +1 - CET 1946 +1 ) CE%sT 1981 +1 O CE%sT +Z Europe/Minsk 1:50:16 - LMT 1880 +1:50 - MMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 Jun 28 +1 ' CE%sT 1944 Jul 3 +3 M MSK/MSD 1990 +3 - MSK 1991 Mar 31 2s +2 M EE%sT 2011 Mar 27 2s +3 - +03 +R * 1918 o - Mar 9 0s 1 S +R * 1918 1919 - O Sat>=1 23s 0 - +R * 1919 o - Mar 1 23s 1 S +R * 1920 o - F 14 23s 1 S +R * 1920 o - O 23 23s 0 - +R * 1921 o - Mar 14 23s 1 S +R * 1921 o - O 25 23s 0 - +R * 1922 o - Mar 25 23s 1 S +R * 1922 1927 - O Sat>=1 23s 0 - +R * 1923 o - Ap 21 23s 1 S +R * 1924 o - Mar 29 23s 1 S +R * 1925 o - Ap 4 23s 1 S +R * 1926 o - Ap 17 23s 1 S +R * 1927 o - Ap 9 23s 1 S +R * 1928 o - Ap 14 23s 1 S +R * 1928 1938 - O Sun>=2 2s 0 - +R * 1929 o - Ap 21 2s 1 S +R * 1930 o - Ap 13 2s 1 S +R * 1931 o - Ap 19 2s 1 S +R * 1932 o - Ap 3 2s 1 S +R * 1933 o - Mar 26 2s 1 S +R * 1934 o - Ap 8 2s 1 S +R * 1935 o - Mar 31 2s 1 S +R * 1936 o - Ap 19 2s 1 S +R * 1937 o - Ap 4 2s 1 S +R * 1938 o - Mar 27 2s 1 S +R * 1939 o - Ap 16 2s 1 S +R * 1939 o - N 19 2s 0 - +R * 1940 o - F 25 2s 1 S +R * 1944 o - S 17 2s 0 - +R * 1945 o - Ap 2 2s 1 S +R * 1945 o - S 16 2s 0 - +R * 1946 o - May 19 2s 1 S +R * 1946 o - O 7 2s 0 - +Z Europe/Brussels 0:17:30 - LMT 1880 +0:17:30 - BMT 1892 May 1 12 +0 - WET 1914 N 8 +1 - CET 1916 May +1 ' CE%sT 1918 N 11 11u +0 * WE%sT 1940 May 20 2s +1 ' CE%sT 1944 S 3 +1 * CE%sT 1977 +1 O CE%sT +R + 1979 o - Mar 31 23 1 S +R + 1979 o - O 1 1 0 - +R + 1980 1982 - Ap Sat>=1 23 1 S +R + 1980 o - S 29 1 0 - +R + 1981 o - S 27 2 0 - +Z Europe/Sofia 1:33:16 - LMT 1880 +1:56:56 - IMT 1894 N 30 +2 - EET 1942 N 2 3 +1 ' CE%sT 1945 +1 - CET 1945 Ap 2 3 +2 - EET 1979 Mar 31 23 +2 + EE%sT 1982 S 26 3 +2 ' EE%sT 1991 +2 W EE%sT 1997 +2 O EE%sT +R , 1945 o - Ap 8 2s 1 S +R , 1945 o - N 18 2s 0 - +R , 1946 o - May 6 2s 1 S +R , 1946 1949 - O Sun>=1 2s 0 - +R , 1947 o - Ap 20 2s 1 S +R , 1948 o - Ap 18 2s 1 S +R , 1949 o - Ap 9 2s 1 S +Z Europe/Prague 0:57:44 - LMT 1850 +0:57:44 - PMT 1891 O +1 ' CE%sT 1944 S 17 2s +1 , CE%sT 1979 +1 O CE%sT +R . 1916 o - May 14 23 1 S +R . 1916 o - S 30 23 0 - +R . 1940 o - May 15 0 1 S +R . 1945 o - Ap 2 2s 1 S +R . 1945 o - Au 15 2s 0 - +R . 1946 o - May 1 2s 1 S +R . 1946 o - S 1 2s 0 - +R . 1947 o - May 4 2s 1 S +R . 1947 o - Au 10 2s 0 - +R . 1948 o - May 9 2s 1 S +R . 1948 o - Au 8 2s 0 - +Z Europe/Copenhagen 0:50:20 - LMT 1890 +0:50:20 - CMT 1894 +1 . CE%sT 1942 N 2 2s +1 ' CE%sT 1945 Ap 2 2 +1 . CE%sT 1980 +1 O CE%sT +Z Atlantic/Faroe -0:27:4 - LMT 1908 Ja 11 +0 - WET 1981 +0 O WE%sT +R / 1991 1992 - Mar lastSun 2 1 D +R / 1991 1992 - S lastSun 2 0 S +R / 1993 2006 - Ap Sun>=1 2 1 D +R / 1993 2006 - O lastSun 2 0 S +R / 2007 ma - Mar Sun>=8 2 1 D +R / 2007 ma - N Sun>=1 2 0 S +Z America/Danmarkshavn -1:14:40 - LMT 1916 Jul 28 +-3 - -03 1980 Ap 6 2 +-3 O -03/-02 1996 +0 - GMT +Z America/Scoresbysund -1:27:52 - LMT 1916 Jul 28 +-2 - -02 1980 Ap 6 2 +-2 ' -02/-01 1981 Mar 29 +-1 O -01/+00 +Z America/Godthab -3:26:56 - LMT 1916 Jul 28 +-3 - -03 1980 Ap 6 2 +-3 O -03/-02 +Z America/Thule -4:35:8 - LMT 1916 Jul 28 +-4 / A%sT +Z Europe/Tallinn 1:39 - LMT 1880 +1:39 - TMT 1918 F +1 ' CE%sT 1919 Jul +1:39 - TMT 1921 May +2 - EET 1940 Au 6 +3 - MSK 1941 S 15 +1 ' CE%sT 1944 S 22 +3 M MSK/MSD 1989 Mar 26 2s +2 1 EEST 1989 S 24 2s +2 ' EE%sT 1998 S 22 +2 O EE%sT 1999 O 31 4 +2 - EET 2002 F 21 +2 O EE%sT +R : 1942 o - Ap 2 24 1 S +R : 1942 o - O 4 1 0 - +R : 1981 1982 - Mar lastSun 2 1 S +R : 1981 1982 - S lastSun 3 0 - +Z Europe/Helsinki 1:39:49 - LMT 1878 May 31 +1:39:49 - HMT 1921 May +2 : EE%sT 1983 +2 O EE%sT +Li Europe/Helsinki Europe/Mariehamn +R ; 1916 o - Jun 14 23s 1 S +R ; 1916 1919 - O Sun>=1 23s 0 - +R ; 1917 o - Mar 24 23s 1 S +R ; 1918 o - Mar 9 23s 1 S +R ; 1919 o - Mar 1 23s 1 S +R ; 1920 o - F 14 23s 1 S +R ; 1920 o - O 23 23s 0 - +R ; 1921 o - Mar 14 23s 1 S +R ; 1921 o - O 25 23s 0 - +R ; 1922 o - Mar 25 23s 1 S +R ; 1922 1938 - O Sat>=1 23s 0 - +R ; 1923 o - May 26 23s 1 S +R ; 1924 o - Mar 29 23s 1 S +R ; 1925 o - Ap 4 23s 1 S +R ; 1926 o - Ap 17 23s 1 S +R ; 1927 o - Ap 9 23s 1 S +R ; 1928 o - Ap 14 23s 1 S +R ; 1929 o - Ap 20 23s 1 S +R ; 1930 o - Ap 12 23s 1 S +R ; 1931 o - Ap 18 23s 1 S +R ; 1932 o - Ap 2 23s 1 S +R ; 1933 o - Mar 25 23s 1 S +R ; 1934 o - Ap 7 23s 1 S +R ; 1935 o - Mar 30 23s 1 S +R ; 1936 o - Ap 18 23s 1 S +R ; 1937 o - Ap 3 23s 1 S +R ; 1938 o - Mar 26 23s 1 S +R ; 1939 o - Ap 15 23s 1 S +R ; 1939 o - N 18 23s 0 - +R ; 1940 o - F 25 2 1 S +R ; 1941 o - May 5 0 2 M +R ; 1941 o - O 6 0 1 S +R ; 1942 o - Mar 9 0 2 M +R ; 1942 o - N 2 3 1 S +R ; 1943 o - Mar 29 2 2 M +R ; 1943 o - O 4 3 1 S +R ; 1944 o - Ap 3 2 2 M +R ; 1944 o - O 8 1 1 S +R ; 1945 o - Ap 2 2 2 M +R ; 1945 o - S 16 3 0 - +R ; 1976 o - Mar 28 1 1 S +R ; 1976 o - S 26 1 0 - +Z Europe/Paris 0:9:21 - LMT 1891 Mar 15 0:1 +0:9:21 - PMT 1911 Mar 11 0:1 +0 ; WE%sT 1940 Jun 14 23 +1 ' CE%sT 1944 Au 25 +0 ; WE%sT 1945 S 16 3 +1 ; CE%sT 1977 +1 O CE%sT +R < 1946 o - Ap 14 2s 1 S +R < 1946 o - O 7 2s 0 - +R < 1947 1949 - O Sun>=1 2s 0 - +R < 1947 o - Ap 6 3s 1 S +R < 1947 o - May 11 2s 2 M +R < 1947 o - Jun 29 3 1 S +R < 1948 o - Ap 18 2s 1 S +R < 1949 o - Ap 10 2s 1 S +R = 1945 o - May 24 2 2 M +R = 1945 o - S 24 3 1 S +R = 1945 o - N 18 2s 0 - +Z Europe/Berlin 0:53:28 - LMT 1893 Ap +1 ' CE%sT 1945 May 24 2 +1 = CE%sT 1946 +1 < CE%sT 1980 +1 O CE%sT +Li Europe/Zurich Europe/Busingen +Z Europe/Gibraltar -0:21:24 - LMT 1880 Au 2 0s +0 % %s 1957 Ap 14 2 +1 - CET 1982 +1 O CE%sT +R > 1932 o - Jul 7 0 1 S +R > 1932 o - S 1 0 0 - +R > 1941 o - Ap 7 0 1 S +R > 1942 o - N 2 3 0 - +R > 1943 o - Mar 30 0 1 S +R > 1943 o - O 4 0 0 - +R > 1952 o - Jul 1 0 1 S +R > 1952 o - N 2 0 0 - +R > 1975 o - Ap 12 0s 1 S +R > 1975 o - N 26 0s 0 - +R > 1976 o - Ap 11 2s 1 S +R > 1976 o - O 10 2s 0 - +R > 1977 1978 - Ap Sun>=1 2s 1 S +R > 1977 o - S 26 2s 0 - +R > 1978 o - S 24 4 0 - +R > 1979 o - Ap 1 9 1 S +R > 1979 o - S 29 2 0 - +R > 1980 o - Ap 1 0 1 S +R > 1980 o - S 28 0 0 - +Z Europe/Athens 1:34:52 - LMT 1895 S 14 +1:34:52 - AMT 1916 Jul 28 0:1 +2 > EE%sT 1941 Ap 30 +1 > CE%sT 1944 Ap 4 +2 > EE%sT 1981 +2 O EE%sT +R ? 1918 o - Ap 1 3 1 S +R ? 1918 o - S 16 3 0 - +R ? 1919 o - Ap 15 3 1 S +R ? 1919 o - N 24 3 0 - +R ? 1945 o - May 1 23 1 S +R ? 1945 o - N 1 0 0 - +R ? 1946 o - Mar 31 2s 1 S +R ? 1946 1949 - O Sun>=1 2s 0 - +R ? 1947 1949 - Ap Sun>=4 2s 1 S +R ? 1950 o - Ap 17 2s 1 S +R ? 1950 o - O 23 2s 0 - +R ? 1954 1955 - May 23 0 1 S +R ? 1954 1955 - O 3 0 0 - +R ? 1956 o - Jun Sun>=1 0 1 S +R ? 1956 o - S lastSun 0 0 - +R ? 1957 o - Jun Sun>=1 1 1 S +R ? 1957 o - S lastSun 3 0 - +R ? 1980 o - Ap 6 1 1 S +Z Europe/Budapest 1:16:20 - LMT 1890 O +1 ' CE%sT 1918 +1 ? CE%sT 1941 Ap 8 +1 ' CE%sT 1945 +1 ? CE%sT 1980 S 28 2s +1 O CE%sT +R @ 1917 1919 - F 19 23 1 S +R @ 1917 o - O 21 1 0 - +R @ 1918 1919 - N 16 1 0 - +R @ 1921 o - Mar 19 23 1 S +R @ 1921 o - Jun 23 1 0 - +R @ 1939 o - Ap 29 23 1 S +R @ 1939 o - O 29 2 0 - +R @ 1940 o - F 25 2 1 S +R @ 1940 1941 - N Sun>=2 1s 0 - +R @ 1941 1942 - Mar Sun>=2 1s 1 S +R @ 1943 1946 - Mar Sun>=1 1s 1 S +R @ 1942 1948 - O Sun>=22 1s 0 - +R @ 1947 1967 - Ap Sun>=1 1s 1 S +R @ 1949 o - O 30 1s 0 - +R @ 1950 1966 - O Sun>=22 1s 0 - +R @ 1967 o - O 29 1s 0 - +Z Atlantic/Reykjavik -1:28 - LMT 1908 +-1 @ -01/+00 1968 Ap 7 1s +0 - GMT +R [ 1916 o - Jun 3 24 1 S +R [ 1916 1917 - S 30 24 0 - +R [ 1917 o - Mar 31 24 1 S +R [ 1918 o - Mar 9 24 1 S +R [ 1918 o - O 6 24 0 - +R [ 1919 o - Mar 1 24 1 S +R [ 1919 o - O 4 24 0 - +R [ 1920 o - Mar 20 24 1 S +R [ 1920 o - S 18 24 0 - +R [ 1940 o - Jun 14 24 1 S +R [ 1942 o - N 2 2s 0 - +R [ 1943 o - Mar 29 2s 1 S +R [ 1943 o - O 4 2s 0 - +R [ 1944 o - Ap 2 2s 1 S +R [ 1944 o - S 17 2s 0 - +R [ 1945 o - Ap 2 2 1 S +R [ 1945 o - S 15 1 0 - +R [ 1946 o - Mar 17 2s 1 S +R [ 1946 o - O 6 2s 0 - +R [ 1947 o - Mar 16 0s 1 S +R [ 1947 o - O 5 0s 0 - +R [ 1948 o - F 29 2s 1 S +R [ 1948 o - O 3 2s 0 - +R [ 1966 1968 - May Sun>=22 0s 1 S +R [ 1966 o - S 24 24 0 - +R [ 1967 1969 - S Sun>=22 0s 0 - +R [ 1969 o - Jun 1 0s 1 S +R [ 1970 o - May 31 0s 1 S +R [ 1970 o - S lastSun 0s 0 - +R [ 1971 1972 - May Sun>=22 0s 1 S +R [ 1971 o - S lastSun 0s 0 - +R [ 1972 o - O 1 0s 0 - +R [ 1973 o - Jun 3 0s 1 S +R [ 1973 1974 - S lastSun 0s 0 - +R [ 1974 o - May 26 0s 1 S +R [ 1975 o - Jun 1 0s 1 S +R [ 1975 1977 - S lastSun 0s 0 - +R [ 1976 o - May 30 0s 1 S +R [ 1977 1979 - May Sun>=22 0s 1 S +R [ 1978 o - O 1 0s 0 - +R [ 1979 o - S 30 0s 0 - +Z Europe/Rome 0:49:56 - LMT 1866 S 22 +0:49:56 - RMT 1893 O 31 23:49:56 +1 [ CE%sT 1943 S 10 +1 ' CE%sT 1944 Jun 4 +1 [ CE%sT 1980 +1 O CE%sT +Li Europe/Rome Europe/Vatican +Li Europe/Rome Europe/San_Marino +R \ 1989 1996 - Mar lastSun 2s 1 S +R \ 1989 1996 - S lastSun 2s 0 - +Z Europe/Riga 1:36:34 - LMT 1880 +1:36:34 - RMT 1918 Ap 15 2 +1:36:34 1 LST 1918 S 16 3 +1:36:34 - RMT 1919 Ap 1 2 +1:36:34 1 LST 1919 May 22 3 +1:36:34 - RMT 1926 May 11 +2 - EET 1940 Au 5 +3 - MSK 1941 Jul +1 ' CE%sT 1944 O 13 +3 M MSK/MSD 1989 Mar lastSun 2s +2 1 EEST 1989 S lastSun 2s +2 \ EE%sT 1997 Ja 21 +2 O EE%sT 2000 F 29 +2 - EET 2001 Ja 2 +2 O EE%sT +Li Europe/Zurich Europe/Vaduz +Z Europe/Vilnius 1:41:16 - LMT 1880 +1:24 - WMT 1917 +1:35:36 - KMT 1919 O 10 +1 - CET 1920 Jul 12 +2 - EET 1920 O 9 +1 - CET 1940 Au 3 +3 - MSK 1941 Jun 24 +1 ' CE%sT 1944 Au +3 M MSK/MSD 1989 Mar 26 2s +2 M EE%sT 1991 S 29 2s +2 ' EE%sT 1998 +2 - EET 1998 Mar 29 1u +1 O CE%sT 1999 O 31 1u +2 - EET 2003 +2 O EE%sT +R ] 1916 o - May 14 23 1 S +R ] 1916 o - O 1 1 0 - +R ] 1917 o - Ap 28 23 1 S +R ] 1917 o - S 17 1 0 - +R ] 1918 o - Ap M>=15 2s 1 S +R ] 1918 o - S M>=15 2s 0 - +R ] 1919 o - Mar 1 23 1 S +R ] 1919 o - O 5 3 0 - +R ] 1920 o - F 14 23 1 S +R ] 1920 o - O 24 2 0 - +R ] 1921 o - Mar 14 23 1 S +R ] 1921 o - O 26 2 0 - +R ] 1922 o - Mar 25 23 1 S +R ] 1922 o - O Sun>=2 1 0 - +R ] 1923 o - Ap 21 23 1 S +R ] 1923 o - O Sun>=2 2 0 - +R ] 1924 o - Mar 29 23 1 S +R ] 1924 1928 - O Sun>=2 1 0 - +R ] 1925 o - Ap 5 23 1 S +R ] 1926 o - Ap 17 23 1 S +R ] 1927 o - Ap 9 23 1 S +R ] 1928 o - Ap 14 23 1 S +R ] 1929 o - Ap 20 23 1 S +Z Europe/Luxembourg 0:24:36 - LMT 1904 Jun +1 ] CE%sT 1918 N 25 +0 ] WE%sT 1929 O 6 2s +0 * WE%sT 1940 May 14 3 +1 ' WE%sT 1944 S 18 3 +1 * CE%sT 1977 +1 O CE%sT +R ^ 1973 o - Mar 31 0s 1 S +R ^ 1973 o - S 29 0s 0 - +R ^ 1974 o - Ap 21 0s 1 S +R ^ 1974 o - S 16 0s 0 - +R ^ 1975 1979 - Ap Sun>=15 2 1 S +R ^ 1975 1980 - S Sun>=15 2 0 - +R ^ 1980 o - Mar 31 2 1 S +Z Europe/Malta 0:58:4 - LMT 1893 N 2 0s +1 [ CE%sT 1973 Mar 31 +1 ^ CE%sT 1981 +1 O CE%sT +R _ 1997 ma - Mar lastSun 2 1 S +R _ 1997 ma - O lastSun 3 0 - +Z Europe/Chisinau 1:55:20 - LMT 1880 +1:55 - CMT 1918 F 15 +1:44:24 - BMT 1931 Jul 24 +2 ` EE%sT 1940 Au 15 +2 1 EEST 1941 Jul 17 +1 ' CE%sT 1944 Au 24 +3 M MSK/MSD 1990 May 6 2 +2 M EE%sT 1992 +2 W EE%sT 1997 +2 _ EE%sT +Z Europe/Monaco 0:29:32 - LMT 1891 Mar 15 +0:9:21 - PMT 1911 Mar 11 +0 ; WE%sT 1945 S 16 3 +1 ; CE%sT 1977 +1 O CE%sT +R { 1916 o - May 1 0 1 NST +R { 1916 o - O 1 0 0 AMT +R { 1917 o - Ap 16 2s 1 NST +R { 1917 o - S 17 2s 0 AMT +R { 1918 1921 - Ap M>=1 2s 1 NST +R { 1918 1921 - S lastM 2s 0 AMT +R { 1922 o - Mar lastSun 2s 1 NST +R { 1922 1936 - O Sun>=2 2s 0 AMT +R { 1923 o - Jun F>=1 2s 1 NST +R { 1924 o - Mar lastSun 2s 1 NST +R { 1925 o - Jun F>=1 2s 1 NST +R { 1926 1931 - May 15 2s 1 NST +R { 1932 o - May 22 2s 1 NST +R { 1933 1936 - May 15 2s 1 NST +R { 1937 o - May 22 2s 1 NST +R { 1937 o - Jul 1 0 1 S +R { 1937 1939 - O Sun>=2 2s 0 - +R { 1938 1939 - May 15 2s 1 S +R { 1945 o - Ap 2 2s 1 S +R { 1945 o - S 16 2s 0 - +Z Europe/Amsterdam 0:19:32 - LMT 1835 +0:19:32 { %s 1937 Jul +0:20 { +0020/+0120 1940 May 16 +1 ' CE%sT 1945 Ap 2 2 +1 { CE%sT 1977 +1 O CE%sT +R | 1916 o - May 22 1 1 S +R | 1916 o - S 30 0 0 - +R | 1945 o - Ap 2 2s 1 S +R | 1945 o - O 1 2s 0 - +R | 1959 1964 - Mar Sun>=15 2s 1 S +R | 1959 1965 - S Sun>=15 2s 0 - +R | 1965 o - Ap 25 2s 1 S +Z Europe/Oslo 0:43 - LMT 1895 +1 | CE%sT 1940 Au 10 23 +1 ' CE%sT 1945 Ap 2 2 +1 | CE%sT 1980 +1 O CE%sT +Li Europe/Oslo Arctic/Longyearbyen +R } 1918 1919 - S 16 2s 0 - +R } 1919 o - Ap 15 2s 1 S +R } 1944 o - Ap 3 2s 1 S +R } 1944 o - O 4 2 0 - +R } 1945 o - Ap 29 0 1 S +R } 1945 o - N 1 0 0 - +R } 1946 o - Ap 14 0s 1 S +R } 1946 o - O 7 2s 0 - +R } 1947 o - May 4 2s 1 S +R } 1947 1949 - O Sun>=1 2s 0 - +R } 1948 o - Ap 18 2s 1 S +R } 1949 o - Ap 10 2s 1 S +R } 1957 o - Jun 2 1s 1 S +R } 1957 1958 - S lastSun 1s 0 - +R } 1958 o - Mar 30 1s 1 S +R } 1959 o - May 31 1s 1 S +R } 1959 1961 - O Sun>=1 1s 0 - +R } 1960 o - Ap 3 1s 1 S +R } 1961 1964 - May lastSun 1s 1 S +R } 1962 1964 - S lastSun 1s 0 - +Z Europe/Warsaw 1:24 - LMT 1880 +1:24 - WMT 1915 Au 5 +1 ' CE%sT 1918 S 16 3 +2 } EE%sT 1922 Jun +1 } CE%sT 1940 Jun 23 2 +1 ' CE%sT 1944 O +1 } CE%sT 1977 +1 & CE%sT 1988 +1 O CE%sT +R ~ 1916 o - Jun 17 23 1 S +R ~ 1916 o - N 1 1 0 - +R ~ 1917 o - F 28 23s 1 S +R ~ 1917 1921 - O 14 23s 0 - +R ~ 1918 o - Mar 1 23s 1 S +R ~ 1919 o - F 28 23s 1 S +R ~ 1920 o - F 29 23s 1 S +R ~ 1921 o - F 28 23s 1 S +R ~ 1924 o - Ap 16 23s 1 S +R ~ 1924 o - O 14 23s 0 - +R ~ 1926 o - Ap 17 23s 1 S +R ~ 1926 1929 - O Sat>=1 23s 0 - +R ~ 1927 o - Ap 9 23s 1 S +R ~ 1928 o - Ap 14 23s 1 S +R ~ 1929 o - Ap 20 23s 1 S +R ~ 1931 o - Ap 18 23s 1 S +R ~ 1931 1932 - O Sat>=1 23s 0 - +R ~ 1932 o - Ap 2 23s 1 S +R ~ 1934 o - Ap 7 23s 1 S +R ~ 1934 1938 - O Sat>=1 23s 0 - +R ~ 1935 o - Mar 30 23s 1 S +R ~ 1936 o - Ap 18 23s 1 S +R ~ 1937 o - Ap 3 23s 1 S +R ~ 1938 o - Mar 26 23s 1 S +R ~ 1939 o - Ap 15 23s 1 S +R ~ 1939 o - N 18 23s 0 - +R ~ 1940 o - F 24 23s 1 S +R ~ 1940 1941 - O 5 23s 0 - +R ~ 1941 o - Ap 5 23s 1 S +R ~ 1942 1945 - Mar Sat>=8 23s 1 S +R ~ 1942 o - Ap 25 22s 2 M +R ~ 1942 o - Au 15 22s 1 S +R ~ 1942 1945 - O Sat>=24 23s 0 - +R ~ 1943 o - Ap 17 22s 2 M +R ~ 1943 1945 - Au Sat>=25 22s 1 S +R ~ 1944 1945 - Ap Sat>=21 22s 2 M +R ~ 1946 o - Ap Sat>=1 23s 1 S +R ~ 1946 o - O Sat>=1 23s 0 - +R ~ 1947 1949 - Ap Sun>=1 2s 1 S +R ~ 1947 1949 - O Sun>=1 2s 0 - +R ~ 1951 1965 - Ap Sun>=1 2s 1 S +R ~ 1951 1965 - O Sun>=1 2s 0 - +R ~ 1977 o - Mar 27 0s 1 S +R ~ 1977 o - S 25 0s 0 - +R ~ 1978 1979 - Ap Sun>=1 0s 1 S +R ~ 1978 o - O 1 0s 0 - +R ~ 1979 1982 - S lastSun 1s 0 - +R ~ 1980 o - Mar lastSun 0s 1 S +R ~ 1981 1982 - Mar lastSun 1s 1 S +R ~ 1983 o - Mar lastSun 2s 1 S +Z Europe/Lisbon -0:36:45 - LMT 1884 +-0:36:45 - LMT 1912 +0 ~ WE%sT 1966 Ap 3 2 +1 - CET 1976 S 26 1 +0 ~ WE%sT 1983 S 25 1s +0 & WE%sT 1992 S 27 1s +1 O CE%sT 1996 Mar 31 1u +0 O WE%sT +Z Atlantic/Azores -1:42:40 - LMT 1884 +-1:54:32 - HMT 1912 +-2 ~ -02/-01 1942 Ap 25 22s +-2 ~ +00 1942 Au 15 22s +-2 ~ -02/-01 1943 Ap 17 22s +-2 ~ +00 1943 Au 28 22s +-2 ~ -02/-01 1944 Ap 22 22s +-2 ~ +00 1944 Au 26 22s +-2 ~ -02/-01 1945 Ap 21 22s +-2 ~ +00 1945 Au 25 22s +-2 ~ -02/-01 1966 Ap 3 2 +-1 ~ -01/+00 1983 S 25 1s +-1 & -01/+00 1992 S 27 1s +0 O WE%sT 1993 Mar 28 1u +-1 O -01/+00 +Z Atlantic/Madeira -1:7:36 - LMT 1884 +-1:7:36 - FMT 1912 +-1 ~ -01/+00 1942 Ap 25 22s +-1 ~ +01 1942 Au 15 22s +-1 ~ -01/+00 1943 Ap 17 22s +-1 ~ +01 1943 Au 28 22s +-1 ~ -01/+00 1944 Ap 22 22s +-1 ~ +01 1944 Au 26 22s +-1 ~ -01/+00 1945 Ap 21 22s +-1 ~ +01 1945 Au 25 22s +-1 ~ -01/+00 1966 Ap 3 2 +0 ~ WE%sT 1983 S 25 1s +0 O WE%sT +R ` 1932 o - May 21 0s 1 S +R ` 1932 1939 - O Sun>=1 0s 0 - +R ` 1933 1939 - Ap Sun>=2 0s 1 S +R ` 1979 o - May 27 0 1 S +R ` 1979 o - S lastSun 0 0 - +R ` 1980 o - Ap 5 23 1 S +R ` 1980 o - S lastSun 1 0 - +R ` 1991 1993 - Mar lastSun 0s 1 S +R ` 1991 1993 - S lastSun 0s 0 - +Z Europe/Bucharest 1:44:24 - LMT 1891 O +1:44:24 - BMT 1931 Jul 24 +2 ` EE%sT 1981 Mar 29 2s +2 ' EE%sT 1991 +2 ` EE%sT 1994 +2 W EE%sT 1997 +2 O EE%sT +Z Europe/Kaliningrad 1:22 - LMT 1893 Ap +1 ' CE%sT 1945 +2 } CE%sT 1946 +3 M MSK/MSD 1989 Mar 26 2s +2 M EE%sT 2011 Mar 27 2s +3 - +03 2014 O 26 2s +2 - EET +Z Europe/Moscow 2:30:17 - LMT 1880 +2:30:17 - MMT 1916 Jul 3 +2:31:19 M %s 1919 Jul 1 0u +3 M %s 1921 O +3 M MSK/MSD 1922 O +2 - EET 1930 Jun 21 +3 M MSK/MSD 1991 Mar 31 2s +2 M EE%sT 1992 Ja 19 2s +3 M MSK/MSD 2011 Mar 27 2s +4 - MSK 2014 O 26 2s +3 - MSK +Z Europe/Simferopol 2:16:24 - LMT 1880 +2:16 - SMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 N +1 ' CE%sT 1944 Ap 13 +3 M MSK/MSD 1990 +3 - MSK 1990 Jul 1 2 +2 - EET 1992 +2 W EE%sT 1994 May +3 W MSK/MSD 1996 Mar 31 0s +3 1 MSD 1996 O 27 3s +3 M MSK/MSD 1997 +3 - MSK 1997 Mar lastSun 1u +2 O EE%sT 2014 Mar 30 2 +4 - MSK 2014 O 26 2s +3 - MSK +Z Europe/Astrakhan 3:12:12 - LMT 1924 May +3 - +03 1930 Jun 21 +4 M +04/+05 1989 Mar 26 2s +3 M +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 Mar 27 2s +4 - +04 +Z Europe/Volgograd 2:57:40 - LMT 1920 Ja 3 +3 - +03 1930 Jun 21 +4 - +04 1961 N 11 +4 M +04/+05 1988 Mar 27 2s +3 M +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 +Z Europe/Saratov 3:4:18 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 M +04/+05 1988 Mar 27 2s +3 M +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 D 4 2s +4 - +04 +Z Europe/Kirov 3:18:48 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 M +04/+05 1989 Mar 26 2s +3 M +03/+04 1991 Mar 31 2s +4 - +04 1992 Mar 29 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 +Z Europe/Samara 3:20:20 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 - +04 1935 Ja 27 +4 M +04/+05 1989 Mar 26 2s +3 M +03/+04 1991 Mar 31 2s +2 M +02/+03 1991 S 29 2s +3 - +03 1991 O 20 3 +4 M +04/+05 2010 Mar 28 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 +Z Europe/Ulyanovsk 3:13:36 - LMT 1919 Jul 1 0u +3 - +03 1930 Jun 21 +4 M +04/+05 1989 Mar 26 2s +3 M +03/+04 1991 Mar 31 2s +2 M +02/+03 1992 Ja 19 2s +3 M +03/+04 2011 Mar 27 2s +4 - +04 2014 O 26 2s +3 - +03 2016 Mar 27 2s +4 - +04 +Z Asia/Yekaterinburg 4:2:33 - LMT 1916 Jul 3 +3:45:5 - PMT 1919 Jul 15 4 +4 - +04 1930 Jun 21 +5 M +05/+06 1991 Mar 31 2s +4 M +04/+05 1992 Ja 19 2s +5 M +05/+06 2011 Mar 27 2s +6 - +06 2014 O 26 2s +5 - +05 +Z Asia/Omsk 4:53:30 - LMT 1919 N 14 +5 - +05 1930 Jun 21 +6 M +06/+07 1991 Mar 31 2s +5 M +05/+06 1992 Ja 19 2s +6 M +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 +Z Asia/Barnaul 5:35 - LMT 1919 D 10 +6 - +06 1930 Jun 21 +7 M +07/+08 1991 Mar 31 2s +6 M +06/+07 1992 Ja 19 2s +7 M +07/+08 1995 May 28 +6 M +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 Mar 27 2s +7 - +07 +Z Asia/Novosibirsk 5:31:40 - LMT 1919 D 14 6 +6 - +06 1930 Jun 21 +7 M +07/+08 1991 Mar 31 2s +6 M +06/+07 1992 Ja 19 2s +7 M +07/+08 1993 May 23 +6 M +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 Jul 24 2s +7 - +07 +Z Asia/Tomsk 5:39:51 - LMT 1919 D 22 +6 - +06 1930 Jun 21 +7 M +07/+08 1991 Mar 31 2s +6 M +06/+07 1992 Ja 19 2s +7 M +07/+08 2002 May 1 3 +6 M +06/+07 2011 Mar 27 2s +7 - +07 2014 O 26 2s +6 - +06 2016 May 29 2s +7 - +07 +Z Asia/Novokuznetsk 5:48:48 - LMT 1924 May +6 - +06 1930 Jun 21 +7 M +07/+08 1991 Mar 31 2s +6 M +06/+07 1992 Ja 19 2s +7 M +07/+08 2010 Mar 28 2s +6 M +06/+07 2011 Mar 27 2s +7 - +07 +Z Asia/Krasnoyarsk 6:11:26 - LMT 1920 Ja 6 +6 - +06 1930 Jun 21 +7 M +07/+08 1991 Mar 31 2s +6 M +06/+07 1992 Ja 19 2s +7 M +07/+08 2011 Mar 27 2s +8 - +08 2014 O 26 2s +7 - +07 +Z Asia/Irkutsk 6:57:5 - LMT 1880 +6:57:5 - IMT 1920 Ja 25 +7 - +07 1930 Jun 21 +8 M +08/+09 1991 Mar 31 2s +7 M +07/+08 1992 Ja 19 2s +8 M +08/+09 2011 Mar 27 2s +9 - +09 2014 O 26 2s +8 - +08 +Z Asia/Chita 7:33:52 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 M +09/+10 1991 Mar 31 2s +8 M +08/+09 1992 Ja 19 2s +9 M +09/+10 2011 Mar 27 2s +10 - +10 2014 O 26 2s +8 - +08 2016 Mar 27 2 +9 - +09 +Z Asia/Yakutsk 8:38:58 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 M +09/+10 1991 Mar 31 2s +8 M +08/+09 1992 Ja 19 2s +9 M +09/+10 2011 Mar 27 2s +10 - +10 2014 O 26 2s +9 - +09 +Z Asia/Vladivostok 8:47:31 - LMT 1922 N 15 +9 - +09 1930 Jun 21 +10 M +10/+11 1991 Mar 31 2s +9 M +09/+10 1992 Ja 19 2s +10 M +10/+11 2011 Mar 27 2s +11 - +11 2014 O 26 2s +10 - +10 +Z Asia/Khandyga 9:2:13 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 M +09/+10 1991 Mar 31 2s +8 M +08/+09 1992 Ja 19 2s +9 M +09/+10 2004 +10 M +10/+11 2011 Mar 27 2s +11 - +11 2011 S 13 0s +10 - +10 2014 O 26 2s +9 - +09 +Z Asia/Sakhalin 9:30:48 - LMT 1905 Au 23 +9 - +09 1945 Au 25 +11 M +11/+12 1991 Mar 31 2s +10 M +10/+11 1992 Ja 19 2s +11 M +11/+12 1997 Mar lastSun 2s +10 M +10/+11 2011 Mar 27 2s +11 - +11 2014 O 26 2s +10 - +10 2016 Mar 27 2s +11 - +11 +Z Asia/Magadan 10:3:12 - LMT 1924 May 2 +10 - +10 1930 Jun 21 +11 M +11/+12 1991 Mar 31 2s +10 M +10/+11 1992 Ja 19 2s +11 M +11/+12 2011 Mar 27 2s +12 - +12 2014 O 26 2s +10 - +10 2016 Ap 24 2s +11 - +11 +Z Asia/Srednekolymsk 10:14:52 - LMT 1924 May 2 +10 - +10 1930 Jun 21 +11 M +11/+12 1991 Mar 31 2s +10 M +10/+11 1992 Ja 19 2s +11 M +11/+12 2011 Mar 27 2s +12 - +12 2014 O 26 2s +11 - +11 +Z Asia/Ust-Nera 9:32:54 - LMT 1919 D 15 +8 - +08 1930 Jun 21 +9 M +09/+10 1981 Ap +11 M +11/+12 1991 Mar 31 2s +10 M +10/+11 1992 Ja 19 2s +11 M +11/+12 2011 Mar 27 2s +12 - +12 2011 S 13 0s +11 - +11 2014 O 26 2s +10 - +10 +Z Asia/Kamchatka 10:34:36 - LMT 1922 N 10 +11 - +11 1930 Jun 21 +12 M +12/+13 1991 Mar 31 2s +11 M +11/+12 1992 Ja 19 2s +12 M +12/+13 2010 Mar 28 2s +11 M +11/+12 2011 Mar 27 2s +12 - +12 +Z Asia/Anadyr 11:49:56 - LMT 1924 May 2 +12 - +12 1930 Jun 21 +13 M +13/+14 1982 Ap 1 0s +12 M +12/+13 1991 Mar 31 2s +11 M +11/+12 1992 Ja 19 2s +12 M +12/+13 2010 Mar 28 2s +11 M +11/+12 2011 Mar 27 2s +12 - +12 +Z Europe/Belgrade 1:22 - LMT 1884 +1 - CET 1941 Ap 18 23 +1 ' CE%sT 1945 +1 - CET 1945 May 8 2s +1 1 CEST 1945 S 16 2s +1 - CET 1982 N 27 +1 O CE%sT +Li Europe/Belgrade Europe/Ljubljana +Li Europe/Belgrade Europe/Podgorica +Li Europe/Belgrade Europe/Sarajevo +Li Europe/Belgrade Europe/Skopje +Li Europe/Belgrade Europe/Zagreb +Li Europe/Prague Europe/Bratislava +R AA 1918 o - Ap 15 23 1 S +R AA 1918 1919 - O 6 24s 0 - +R AA 1919 o - Ap 6 23 1 S +R AA 1924 o - Ap 16 23 1 S +R AA 1924 o - O 4 24s 0 - +R AA 1926 o - Ap 17 23 1 S +R AA 1926 1929 - O Sat>=1 24s 0 - +R AA 1927 o - Ap 9 23 1 S +R AA 1928 o - Ap 15 0 1 S +R AA 1929 o - Ap 20 23 1 S +R AA 1937 o - Jun 16 23 1 S +R AA 1937 o - O 2 24s 0 - +R AA 1938 o - Ap 2 23 1 S +R AA 1938 o - Ap 30 23 2 M +R AA 1938 o - O 2 24 1 S +R AA 1939 o - O 7 24s 0 - +R AA 1942 o - May 2 23 1 S +R AA 1942 o - S 1 1 0 - +R AA 1943 1946 - Ap Sat>=13 23 1 S +R AA 1943 1944 - O Sun>=1 1 0 - +R AA 1945 1946 - S lastSun 1 0 - +R AA 1949 o - Ap 30 23 1 S +R AA 1949 o - O 2 1 0 - +R AA 1974 1975 - Ap Sat>=12 23 1 S +R AA 1974 1975 - O Sun>=1 1 0 - +R AA 1976 o - Mar 27 23 1 S +R AA 1976 1977 - S lastSun 1 0 - +R AA 1977 o - Ap 2 23 1 S +R AA 1978 o - Ap 2 2s 1 S +R AA 1978 o - O 1 2s 0 - +R AB 1967 o - Jun 3 12 1 S +R AB 1967 o - O 1 0 0 - +R AB 1974 o - Jun 24 0 1 S +R AB 1974 o - S 1 0 0 - +R AB 1976 1977 - May 1 0 1 S +R AB 1976 o - Au 1 0 0 - +R AB 1977 o - S 28 0 0 - +R AB 1978 o - Jun 1 0 1 S +R AB 1978 o - Au 4 0 0 - +Z Europe/Madrid -0:14:44 - LMT 1900 D 31 23:45:16 +0 AA WE%sT 1940 Mar 16 23 +1 AA CE%sT 1979 +1 O CE%sT +Z Africa/Ceuta -0:21:16 - LMT 1900 D 31 23:38:44 +0 - WET 1918 May 6 23 +0 1 WEST 1918 O 7 23 +0 - WET 1924 +0 AA WE%sT 1929 +0 AB WE%sT 1984 Mar 16 +1 - CET 1986 +1 O CE%sT +Z Atlantic/Canary -1:1:36 - LMT 1922 Mar +-1 - -01 1946 S 30 1 +0 - WET 1980 Ap 6 0s +0 1 WEST 1980 S 28 1u +0 O WE%sT +Z Europe/Stockholm 1:12:12 - LMT 1879 +1:0:14 - SET 1900 +1 - CET 1916 May 14 23 +1 1 CEST 1916 O 1 1 +1 - CET 1980 +1 O CE%sT +R AC 1941 1942 - May M>=1 1 1 S +R AC 1941 1942 - O M>=1 2 0 - +Z Europe/Zurich 0:34:8 - LMT 1853 Jul 16 +0:29:46 - BMT 1894 Jun +1 AC CE%sT 1981 +1 O CE%sT +R AD 1916 o - May 1 0 1 S +R AD 1916 o - O 1 0 0 - +R AD 1920 o - Mar 28 0 1 S +R AD 1920 o - O 25 0 0 - +R AD 1921 o - Ap 3 0 1 S +R AD 1921 o - O 3 0 0 - +R AD 1922 o - Mar 26 0 1 S +R AD 1922 o - O 8 0 0 - +R AD 1924 o - May 13 0 1 S +R AD 1924 1925 - O 1 0 0 - +R AD 1925 o - May 1 0 1 S +R AD 1940 o - Jun 30 0 1 S +R AD 1940 o - O 5 0 0 - +R AD 1940 o - D 1 0 1 S +R AD 1941 o - S 21 0 0 - +R AD 1942 o - Ap 1 0 1 S +R AD 1942 o - N 1 0 0 - +R AD 1945 o - Ap 2 0 1 S +R AD 1945 o - O 8 0 0 - +R AD 1946 o - Jun 1 0 1 S +R AD 1946 o - O 1 0 0 - +R AD 1947 1948 - Ap Sun>=16 0 1 S +R AD 1947 1950 - O Sun>=2 0 0 - +R AD 1949 o - Ap 10 0 1 S +R AD 1950 o - Ap 19 0 1 S +R AD 1951 o - Ap 22 0 1 S +R AD 1951 o - O 8 0 0 - +R AD 1962 o - Jul 15 0 1 S +R AD 1962 o - O 8 0 0 - +R AD 1964 o - May 15 0 1 S +R AD 1964 o - O 1 0 0 - +R AD 1970 1972 - May Sun>=2 0 1 S +R AD 1970 1972 - O Sun>=2 0 0 - +R AD 1973 o - Jun 3 1 1 S +R AD 1973 o - N 4 3 0 - +R AD 1974 o - Mar 31 2 1 S +R AD 1974 o - N 3 5 0 - +R AD 1975 o - Mar 30 0 1 S +R AD 1975 1976 - O lastSun 0 0 - +R AD 1976 o - Jun 1 0 1 S +R AD 1977 1978 - Ap Sun>=1 0 1 S +R AD 1977 o - O 16 0 0 - +R AD 1979 1980 - Ap Sun>=1 3 1 S +R AD 1979 1982 - O M>=11 0 0 - +R AD 1981 1982 - Mar lastSun 3 1 S +R AD 1983 o - Jul 31 0 1 S +R AD 1983 o - O 2 0 0 - +R AD 1985 o - Ap 20 0 1 S +R AD 1985 o - S 28 0 0 - +R AD 1986 1993 - Mar lastSun 1s 1 S +R AD 1986 1995 - S lastSun 1s 0 - +R AD 1994 o - Mar 20 1s 1 S +R AD 1995 2006 - Mar lastSun 1s 1 S +R AD 1996 2006 - O lastSun 1s 0 - +Z Europe/Istanbul 1:55:52 - LMT 1880 +1:56:56 - IMT 1910 O +2 AD EE%sT 1978 O 15 +3 AD +03/+04 1985 Ap 20 +2 AD EE%sT 2007 +2 O EE%sT 2011 Mar 27 1u +2 - EET 2011 Mar 28 1u +2 O EE%sT 2014 Mar 30 1u +2 - EET 2014 Mar 31 1u +2 O EE%sT 2015 O 25 1u +2 1 EEST 2015 N 8 1u +2 O EE%sT 2016 S 7 +3 - +03 +Li Europe/Istanbul Asia/Istanbul +Z Europe/Kiev 2:2:4 - LMT 1880 +2:2:4 - KMT 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 S 20 +1 ' CE%sT 1943 N 6 +3 M MSK/MSD 1990 Jul 1 2 +2 1 EEST 1991 S 29 3 +2 W EE%sT 1995 +2 O EE%sT +Z Europe/Uzhgorod 1:29:12 - LMT 1890 O +1 - CET 1940 +1 ' CE%sT 1944 O +1 1 CEST 1944 O 26 +1 - CET 1945 Jun 29 +3 M MSK/MSD 1990 +3 - MSK 1990 Jul 1 2 +1 - CET 1991 Mar 31 3 +2 - EET 1992 +2 W EE%sT 1995 +2 O EE%sT +Z Europe/Zaporozhye 2:20:40 - LMT 1880 +2:20 - +0220 1924 May 2 +2 - EET 1930 Jun 21 +3 - MSK 1941 Au 25 +1 ' CE%sT 1943 O 25 +3 M MSK/MSD 1991 Mar 31 2 +2 W EE%sT 1995 +2 O EE%sT +R AE 1918 1919 - Mar lastSun 2 1 D +R AE 1918 1919 - O lastSun 2 0 S +R AE 1942 o - F 9 2 1 W +R AE 1945 o - Au 14 23u 1 P +R AE 1945 o - S lastSun 2 0 S +R AE 1967 2006 - O lastSun 2 0 S +R AE 1967 1973 - Ap lastSun 2 1 D +R AE 1974 o - Ja 6 2 1 D +R AE 1975 o - F 23 2 1 D +R AE 1976 1986 - Ap lastSun 2 1 D +R AE 1987 2006 - Ap Sun>=1 2 1 D +R AE 2007 ma - Mar Sun>=8 2 1 D +R AE 2007 ma - N Sun>=1 2 0 S +Z EST -5 - EST +Z MST -7 - MST +Z HST -10 - HST +Z EST5EDT -5 AE E%sT +Z CST6CDT -6 AE C%sT +Z MST7MDT -7 AE M%sT +Z PST8PDT -8 AE P%sT +R AF 1920 o - Mar lastSun 2 1 D +R AF 1920 o - O lastSun 2 0 S +R AF 1921 1966 - Ap lastSun 2 1 D +R AF 1921 1954 - S lastSun 2 0 S +R AF 1955 1966 - O lastSun 2 0 S +Z America/New_York -4:56:2 - LMT 1883 N 18 12:3:58 +-5 AE E%sT 1920 +-5 AF E%sT 1942 +-5 AE E%sT 1946 +-5 AF E%sT 1967 +-5 AE E%sT +R AG 1920 o - Jun 13 2 1 D +R AG 1920 1921 - O lastSun 2 0 S +R AG 1921 o - Mar lastSun 2 1 D +R AG 1922 1966 - Ap lastSun 2 1 D +R AG 1922 1954 - S lastSun 2 0 S +R AG 1955 1966 - O lastSun 2 0 S +Z America/Chicago -5:50:36 - LMT 1883 N 18 12:9:24 +-6 AE C%sT 1920 +-6 AG C%sT 1936 Mar 1 2 +-5 - EST 1936 N 15 2 +-6 AG C%sT 1942 +-6 AE C%sT 1946 +-6 AG C%sT 1967 +-6 AE C%sT +Z America/North_Dakota/Center -6:45:12 - LMT 1883 N 18 12:14:48 +-7 AE M%sT 1992 O 25 2 +-6 AE C%sT +Z America/North_Dakota/New_Salem -6:45:39 - LMT 1883 N 18 12:14:21 +-7 AE M%sT 2003 O 26 2 +-6 AE C%sT +Z America/North_Dakota/Beulah -6:47:7 - LMT 1883 N 18 12:12:53 +-7 AE M%sT 2010 N 7 2 +-6 AE C%sT +R AH 1920 1921 - Mar lastSun 2 1 D +R AH 1920 o - O lastSun 2 0 S +R AH 1921 o - May 22 2 0 S +R AH 1965 1966 - Ap lastSun 2 1 D +R AH 1965 1966 - O lastSun 2 0 S +Z America/Denver -6:59:56 - LMT 1883 N 18 12:0:4 +-7 AE M%sT 1920 +-7 AH M%sT 1942 +-7 AE M%sT 1946 +-7 AH M%sT 1967 +-7 AE M%sT +R AI 1948 o - Mar 14 2:1 1 D +R AI 1949 o - Ja 1 2 0 S +R AI 1950 1966 - Ap lastSun 1 1 D +R AI 1950 1961 - S lastSun 2 0 S +R AI 1962 1966 - O lastSun 2 0 S +Z America/Los_Angeles -7:52:58 - LMT 1883 N 18 12:7:2 +-8 AE P%sT 1946 +-8 AI P%sT 1967 +-8 AE P%sT +Z America/Juneau 15:2:19 - LMT 1867 O 19 15:33:32 +-8:57:41 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 AE P%sT 1946 +-8 - PST 1969 +-8 AE P%sT 1980 Ap 27 2 +-9 AE Y%sT 1980 O 26 2 +-8 AE P%sT 1983 O 30 2 +-9 AE Y%sT 1983 N 30 +-9 AE AK%sT +Z America/Sitka 14:58:47 - LMT 1867 O 19 15:30 +-9:1:13 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 AE P%sT 1946 +-8 - PST 1969 +-8 AE P%sT 1983 O 30 2 +-9 AE Y%sT 1983 N 30 +-9 AE AK%sT +Z America/Metlakatla 15:13:42 - LMT 1867 O 19 15:44:55 +-8:46:18 - LMT 1900 Au 20 12 +-8 - PST 1942 +-8 AE P%sT 1946 +-8 - PST 1969 +-8 AE P%sT 1983 O 30 2 +-8 - PST 2015 N 1 2 +-9 AE AK%sT +Z America/Yakutat 14:41:5 - LMT 1867 O 19 15:12:18 +-9:18:55 - LMT 1900 Au 20 12 +-9 - YST 1942 +-9 AE Y%sT 1946 +-9 - YST 1969 +-9 AE Y%sT 1983 N 30 +-9 AE AK%sT +Z America/Anchorage 14:0:24 - LMT 1867 O 19 14:31:37 +-9:59:36 - LMT 1900 Au 20 12 +-10 - AST 1942 +-10 AE A%sT 1967 Ap +-10 - AHST 1969 +-10 AE AH%sT 1983 O 30 2 +-9 AE Y%sT 1983 N 30 +-9 AE AK%sT +Z America/Nome 12:58:22 - LMT 1867 O 19 13:29:35 +-11:1:38 - LMT 1900 Au 20 12 +-11 - NST 1942 +-11 AE N%sT 1946 +-11 - NST 1967 Ap +-11 - BST 1969 +-11 AE B%sT 1983 O 30 2 +-9 AE Y%sT 1983 N 30 +-9 AE AK%sT +Z America/Adak 12:13:22 - LMT 1867 O 19 12:44:35 +-11:46:38 - LMT 1900 Au 20 12 +-11 - NST 1942 +-11 AE N%sT 1946 +-11 - NST 1967 Ap +-11 - BST 1969 +-11 AE B%sT 1983 O 30 2 +-10 AE AH%sT 1983 N 30 +-10 AE H%sT +Z Pacific/Honolulu -10:31:26 - LMT 1896 Ja 13 12 +-10:30 - HST 1933 Ap 30 2 +-10:30 1 HDT 1933 May 21 12 +-10:30 - HST 1942 F 9 2 +-10:30 1 HDT 1945 S 30 2 +-10:30 - HST 1947 Jun 8 2 +-10 - HST +Z America/Phoenix -7:28:18 - LMT 1883 N 18 11:31:42 +-7 AE M%sT 1944 Ja 1 0:1 +-7 - MST 1944 Ap 1 0:1 +-7 AE M%sT 1944 O 1 0:1 +-7 - MST 1967 +-7 AE M%sT 1968 Mar 21 +-7 - MST +Z America/Boise -7:44:49 - LMT 1883 N 18 12:15:11 +-8 AE P%sT 1923 May 13 2 +-7 AE M%sT 1974 +-7 - MST 1974 F 3 2 +-7 AE M%sT +R AJ 1941 o - Jun 22 2 1 D +R AJ 1941 1954 - S lastSun 2 0 S +R AJ 1946 1954 - Ap lastSun 2 1 D +Z America/Indiana/Indianapolis -5:44:38 - LMT 1883 N 18 12:15:22 +-6 AE C%sT 1920 +-6 AJ C%sT 1942 +-6 AE C%sT 1946 +-6 AJ C%sT 1955 Ap 24 2 +-5 - EST 1957 S 29 2 +-6 - CST 1958 Ap 27 2 +-5 - EST 1969 +-5 AE E%sT 1971 +-5 - EST 2006 +-5 AE E%sT +R AK 1951 o - Ap lastSun 2 1 D +R AK 1951 o - S lastSun 2 0 S +R AK 1954 1960 - Ap lastSun 2 1 D +R AK 1954 1960 - S lastSun 2 0 S +Z America/Indiana/Marengo -5:45:23 - LMT 1883 N 18 12:14:37 +-6 AE C%sT 1951 +-6 AK C%sT 1961 Ap 30 2 +-5 - EST 1969 +-5 AE E%sT 1974 Ja 6 2 +-6 1 CDT 1974 O 27 2 +-5 AE E%sT 1976 +-5 - EST 2006 +-5 AE E%sT +R AL 1946 o - Ap lastSun 2 1 D +R AL 1946 o - S lastSun 2 0 S +R AL 1953 1954 - Ap lastSun 2 1 D +R AL 1953 1959 - S lastSun 2 0 S +R AL 1955 o - May 1 0 1 D +R AL 1956 1963 - Ap lastSun 2 1 D +R AL 1960 o - O lastSun 2 0 S +R AL 1961 o - S lastSun 2 0 S +R AL 1962 1963 - O lastSun 2 0 S +Z America/Indiana/Vincennes -5:50:7 - LMT 1883 N 18 12:9:53 +-6 AE C%sT 1946 +-6 AL C%sT 1964 Ap 26 2 +-5 - EST 1969 +-5 AE E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 AE C%sT 2007 N 4 2 +-5 AE E%sT +R AM 1946 o - Ap lastSun 2 1 D +R AM 1946 o - S lastSun 2 0 S +R AM 1953 1954 - Ap lastSun 2 1 D +R AM 1953 1959 - S lastSun 2 0 S +R AM 1955 o - May 1 0 1 D +R AM 1956 1963 - Ap lastSun 2 1 D +R AM 1960 o - O lastSun 2 0 S +R AM 1961 o - S lastSun 2 0 S +R AM 1962 1963 - O lastSun 2 0 S +Z America/Indiana/Tell_City -5:47:3 - LMT 1883 N 18 12:12:57 +-6 AE C%sT 1946 +-6 AM C%sT 1964 Ap 26 2 +-5 - EST 1969 +-5 AE E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 AE C%sT +R AN 1955 o - May 1 0 1 D +R AN 1955 1960 - S lastSun 2 0 S +R AN 1956 1964 - Ap lastSun 2 1 D +R AN 1961 1964 - O lastSun 2 0 S +Z America/Indiana/Petersburg -5:49:7 - LMT 1883 N 18 12:10:53 +-6 AE C%sT 1955 +-6 AN C%sT 1965 Ap 25 2 +-5 - EST 1966 O 30 2 +-6 AE C%sT 1977 O 30 2 +-5 - EST 2006 Ap 2 2 +-6 AE C%sT 2007 N 4 2 +-5 AE E%sT +R AO 1947 1961 - Ap lastSun 2 1 D +R AO 1947 1954 - S lastSun 2 0 S +R AO 1955 1956 - O lastSun 2 0 S +R AO 1957 1958 - S lastSun 2 0 S +R AO 1959 1961 - O lastSun 2 0 S +Z America/Indiana/Knox -5:46:30 - LMT 1883 N 18 12:13:30 +-6 AE C%sT 1947 +-6 AO C%sT 1962 Ap 29 2 +-5 - EST 1963 O 27 2 +-6 AE C%sT 1991 O 27 2 +-5 - EST 2006 Ap 2 2 +-6 AE C%sT +R AP 1946 1960 - Ap lastSun 2 1 D +R AP 1946 1954 - S lastSun 2 0 S +R AP 1955 1956 - O lastSun 2 0 S +R AP 1957 1960 - S lastSun 2 0 S +Z America/Indiana/Winamac -5:46:25 - LMT 1883 N 18 12:13:35 +-6 AE C%sT 1946 +-6 AP C%sT 1961 Ap 30 2 +-5 - EST 1969 +-5 AE E%sT 1971 +-5 - EST 2006 Ap 2 2 +-6 AE C%sT 2007 Mar 11 2 +-5 AE E%sT +Z America/Indiana/Vevay -5:40:16 - LMT 1883 N 18 12:19:44 +-6 AE C%sT 1954 Ap 25 2 +-5 - EST 1969 +-5 AE E%sT 1973 +-5 - EST 2006 +-5 AE E%sT +R AQ 1921 o - May 1 2 1 D +R AQ 1921 o - S 1 2 0 S +R AQ 1941 1961 - Ap lastSun 2 1 D +R AQ 1941 o - S lastSun 2 0 S +R AQ 1946 o - Jun 2 2 0 S +R AQ 1950 1955 - S lastSun 2 0 S +R AQ 1956 1960 - O lastSun 2 0 S +Z America/Kentucky/Louisville -5:43:2 - LMT 1883 N 18 12:16:58 +-6 AE C%sT 1921 +-6 AQ C%sT 1942 +-6 AE C%sT 1946 +-6 AQ C%sT 1961 Jul 23 2 +-5 - EST 1968 +-5 AE E%sT 1974 Ja 6 2 +-6 1 CDT 1974 O 27 2 +-5 AE E%sT +Z America/Kentucky/Monticello -5:39:24 - LMT 1883 N 18 12:20:36 +-6 AE C%sT 1946 +-6 - CST 1968 +-6 AE C%sT 2000 O 29 2 +-5 AE E%sT +R AR 1948 o - Ap lastSun 2 1 D +R AR 1948 o - S lastSun 2 0 S +Z America/Detroit -5:32:11 - LMT 1905 +-6 - CST 1915 May 15 2 +-5 - EST 1942 +-5 AE E%sT 1946 +-5 AR E%sT 1973 +-5 AE E%sT 1975 +-5 - EST 1975 Ap 27 2 +-5 AE E%sT +R AS 1946 o - Ap lastSun 2 1 D +R AS 1946 o - S lastSun 2 0 S +R AS 1966 o - Ap lastSun 2 1 D +R AS 1966 o - O lastSun 2 0 S +Z America/Menominee -5:50:27 - LMT 1885 S 18 12 +-6 AE C%sT 1946 +-6 AS C%sT 1969 Ap 27 2 +-5 - EST 1973 Ap 29 2 +-6 AE C%sT +R AT 1918 o - Ap 14 2 1 D +R AT 1918 o - O 27 2 0 S +R AT 1942 o - F 9 2 1 W +R AT 1945 o - Au 14 23u 1 P +R AT 1945 o - S 30 2 0 S +R AT 1974 1986 - Ap lastSun 2 1 D +R AT 1974 2006 - O lastSun 2 0 S +R AT 1987 2006 - Ap Sun>=1 2 1 D +R AT 2007 ma - Mar Sun>=8 2 1 D +R AT 2007 ma - N Sun>=1 2 0 S +R AU 1917 o - Ap 8 2 1 D +R AU 1917 o - S 17 2 0 S +R AU 1919 o - May 5 23 1 D +R AU 1919 o - Au 12 23 0 S +R AU 1920 1935 - May Sun>=1 23 1 D +R AU 1920 1935 - O lastSun 23 0 S +R AU 1936 1941 - May M>=9 0 1 D +R AU 1936 1941 - O M>=2 0 0 S +R AU 1946 1950 - May Sun>=8 2 1 D +R AU 1946 1950 - O Sun>=2 2 0 S +R AU 1951 1986 - Ap lastSun 2 1 D +R AU 1951 1959 - S lastSun 2 0 S +R AU 1960 1986 - O lastSun 2 0 S +R AU 1987 o - Ap Sun>=1 0:1 1 D +R AU 1987 2006 - O lastSun 0:1 0 S +R AU 1988 o - Ap Sun>=1 0:1 2 DD +R AU 1989 2006 - Ap Sun>=1 0:1 1 D +R AU 2007 2011 - Mar Sun>=8 0:1 1 D +R AU 2007 2010 - N Sun>=1 0:1 0 S +Z America/St_Johns -3:30:52 - LMT 1884 +-3:30:52 AU N%sT 1918 +-3:30:52 AT N%sT 1919 +-3:30:52 AU N%sT 1935 Mar 30 +-3:30 AU N%sT 1942 May 11 +-3:30 AT N%sT 1946 +-3:30 AU N%sT 2011 N +-3:30 AT N%sT +Z America/Goose_Bay -4:1:40 - LMT 1884 +-3:30:52 - NST 1918 +-3:30:52 AT N%sT 1919 +-3:30:52 - NST 1935 Mar 30 +-3:30 - NST 1936 +-3:30 AU N%sT 1942 May 11 +-3:30 AT N%sT 1946 +-3:30 AU N%sT 1966 Mar 15 2 +-4 AU A%sT 2011 N +-4 AT A%sT +R AV 1916 o - Ap 1 0 1 D +R AV 1916 o - O 1 0 0 S +R AV 1920 o - May 9 0 1 D +R AV 1920 o - Au 29 0 0 S +R AV 1921 o - May 6 0 1 D +R AV 1921 1922 - S 5 0 0 S +R AV 1922 o - Ap 30 0 1 D +R AV 1923 1925 - May Sun>=1 0 1 D +R AV 1923 o - S 4 0 0 S +R AV 1924 o - S 15 0 0 S +R AV 1925 o - S 28 0 0 S +R AV 1926 o - May 16 0 1 D +R AV 1926 o - S 13 0 0 S +R AV 1927 o - May 1 0 1 D +R AV 1927 o - S 26 0 0 S +R AV 1928 1931 - May Sun>=8 0 1 D +R AV 1928 o - S 9 0 0 S +R AV 1929 o - S 3 0 0 S +R AV 1930 o - S 15 0 0 S +R AV 1931 1932 - S M>=24 0 0 S +R AV 1932 o - May 1 0 1 D +R AV 1933 o - Ap 30 0 1 D +R AV 1933 o - O 2 0 0 S +R AV 1934 o - May 20 0 1 D +R AV 1934 o - S 16 0 0 S +R AV 1935 o - Jun 2 0 1 D +R AV 1935 o - S 30 0 0 S +R AV 1936 o - Jun 1 0 1 D +R AV 1936 o - S 14 0 0 S +R AV 1937 1938 - May Sun>=1 0 1 D +R AV 1937 1941 - S M>=24 0 0 S +R AV 1939 o - May 28 0 1 D +R AV 1940 1941 - May Sun>=1 0 1 D +R AV 1946 1949 - Ap lastSun 2 1 D +R AV 1946 1949 - S lastSun 2 0 S +R AV 1951 1954 - Ap lastSun 2 1 D +R AV 1951 1954 - S lastSun 2 0 S +R AV 1956 1959 - Ap lastSun 2 1 D +R AV 1956 1959 - S lastSun 2 0 S +R AV 1962 1973 - Ap lastSun 2 1 D +R AV 1962 1973 - O lastSun 2 0 S +Z America/Halifax -4:14:24 - LMT 1902 Jun 15 +-4 AV A%sT 1918 +-4 AT A%sT 1919 +-4 AV A%sT 1942 F 9 2s +-4 AT A%sT 1946 +-4 AV A%sT 1974 +-4 AT A%sT +Z America/Glace_Bay -3:59:48 - LMT 1902 Jun 15 +-4 AT A%sT 1953 +-4 AV A%sT 1954 +-4 - AST 1972 +-4 AV A%sT 1974 +-4 AT A%sT +R AW 1933 1935 - Jun Sun>=8 1 1 D +R AW 1933 1935 - S Sun>=8 1 0 S +R AW 1936 1938 - Jun Sun>=1 1 1 D +R AW 1936 1938 - S Sun>=1 1 0 S +R AW 1939 o - May 27 1 1 D +R AW 1939 1941 - S Sat>=21 1 0 S +R AW 1940 o - May 19 1 1 D +R AW 1941 o - May 4 1 1 D +R AW 1946 1972 - Ap lastSun 2 1 D +R AW 1946 1956 - S lastSun 2 0 S +R AW 1957 1972 - O lastSun 2 0 S +R AW 1993 2006 - Ap Sun>=1 0:1 1 D +R AW 1993 2006 - O lastSun 0:1 0 S +Z America/Moncton -4:19:8 - LMT 1883 D 9 +-5 - EST 1902 Jun 15 +-4 AT A%sT 1933 +-4 AW A%sT 1942 +-4 AT A%sT 1946 +-4 AW A%sT 1973 +-4 AT A%sT 1993 +-4 AW A%sT 2007 +-4 AT A%sT +Z America/Blanc-Sablon -3:48:28 - LMT 1884 +-4 AT A%sT 1970 +-4 - AST +R AX 1919 o - Mar 30 23:30 1 D +R AX 1919 o - O 26 0 0 S +R AX 1920 o - May 2 2 1 D +R AX 1920 o - S 26 0 0 S +R AX 1921 o - May 15 2 1 D +R AX 1921 o - S 15 2 0 S +R AX 1922 1923 - May Sun>=8 2 1 D +R AX 1922 1926 - S Sun>=15 2 0 S +R AX 1924 1927 - May Sun>=1 2 1 D +R AX 1927 1932 - S lastSun 2 0 S +R AX 1928 1931 - Ap lastSun 2 1 D +R AX 1932 o - May 1 2 1 D +R AX 1933 1940 - Ap lastSun 2 1 D +R AX 1933 o - O 1 2 0 S +R AX 1934 1939 - S lastSun 2 0 S +R AX 1945 1946 - S lastSun 2 0 S +R AX 1946 o - Ap lastSun 2 1 D +R AX 1947 1949 - Ap lastSun 0 1 D +R AX 1947 1948 - S lastSun 0 0 S +R AX 1949 o - N lastSun 0 0 S +R AX 1950 1973 - Ap lastSun 2 1 D +R AX 1950 o - N lastSun 2 0 S +R AX 1951 1956 - S lastSun 2 0 S +R AX 1957 1973 - O lastSun 2 0 S +Z America/Toronto -5:17:32 - LMT 1895 +-5 AT E%sT 1919 +-5 AX E%sT 1942 F 9 2s +-5 AT E%sT 1946 +-5 AX E%sT 1974 +-5 AT E%sT +Z America/Thunder_Bay -5:57 - LMT 1895 +-6 - CST 1910 +-5 - EST 1942 +-5 AT E%sT 1970 +-5 AX E%sT 1973 +-5 - EST 1974 +-5 AT E%sT +Z America/Nipigon -5:53:4 - LMT 1895 +-5 AT E%sT 1940 S 29 +-5 1 EDT 1942 F 9 2s +-5 AT E%sT +Z America/Rainy_River -6:18:16 - LMT 1895 +-6 AT C%sT 1940 S 29 +-6 1 CDT 1942 F 9 2s +-6 AT C%sT +Z America/Atikokan -6:6:28 - LMT 1895 +-6 AT C%sT 1940 S 29 +-6 1 CDT 1942 F 9 2s +-6 AT C%sT 1945 S 30 2 +-5 - EST +R AY 1916 o - Ap 23 0 1 D +R AY 1916 o - S 17 0 0 S +R AY 1918 o - Ap 14 2 1 D +R AY 1918 o - O 27 2 0 S +R AY 1937 o - May 16 2 1 D +R AY 1937 o - S 26 2 0 S +R AY 1942 o - F 9 2 1 W +R AY 1945 o - Au 14 23u 1 P +R AY 1945 o - S lastSun 2 0 S +R AY 1946 o - May 12 2 1 D +R AY 1946 o - O 13 2 0 S +R AY 1947 1949 - Ap lastSun 2 1 D +R AY 1947 1949 - S lastSun 2 0 S +R AY 1950 o - May 1 2 1 D +R AY 1950 o - S 30 2 0 S +R AY 1951 1960 - Ap lastSun 2 1 D +R AY 1951 1958 - S lastSun 2 0 S +R AY 1959 o - O lastSun 2 0 S +R AY 1960 o - S lastSun 2 0 S +R AY 1963 o - Ap lastSun 2 1 D +R AY 1963 o - S 22 2 0 S +R AY 1966 1986 - Ap lastSun 2s 1 D +R AY 1966 2005 - O lastSun 2s 0 S +R AY 1987 2005 - Ap Sun>=1 2s 1 D +Z America/Winnipeg -6:28:36 - LMT 1887 Jul 16 +-6 AY C%sT 2006 +-6 AT C%sT +R AZ 1918 o - Ap 14 2 1 D +R AZ 1918 o - O 27 2 0 S +R AZ 1930 1934 - May Sun>=1 0 1 D +R AZ 1930 1934 - O Sun>=1 0 0 S +R AZ 1937 1941 - Ap Sun>=8 0 1 D +R AZ 1937 o - O Sun>=8 0 0 S +R AZ 1938 o - O Sun>=1 0 0 S +R AZ 1939 1941 - O Sun>=8 0 0 S +R AZ 1942 o - F 9 2 1 W +R AZ 1945 o - Au 14 23u 1 P +R AZ 1945 o - S lastSun 2 0 S +R AZ 1946 o - Ap Sun>=8 2 1 D +R AZ 1946 o - O Sun>=8 2 0 S +R AZ 1947 1957 - Ap lastSun 2 1 D +R AZ 1947 1957 - S lastSun 2 0 S +R AZ 1959 o - Ap lastSun 2 1 D +R AZ 1959 o - O lastSun 2 0 S +R Aa 1957 o - Ap lastSun 2 1 D +R Aa 1957 o - O lastSun 2 0 S +R Aa 1959 1961 - Ap lastSun 2 1 D +R Aa 1959 o - O lastSun 2 0 S +R Aa 1960 1961 - S lastSun 2 0 S +Z America/Regina -6:58:36 - LMT 1905 S +-7 AZ M%sT 1960 Ap lastSun 2 +-6 - CST +Z America/Swift_Current -7:11:20 - LMT 1905 S +-7 AT M%sT 1946 Ap lastSun 2 +-7 AZ M%sT 1950 +-7 Aa M%sT 1972 Ap lastSun 2 +-6 - CST +R Ab 1918 1919 - Ap Sun>=8 2 1 D +R Ab 1918 o - O 27 2 0 S +R Ab 1919 o - May 27 2 0 S +R Ab 1920 1923 - Ap lastSun 2 1 D +R Ab 1920 o - O lastSun 2 0 S +R Ab 1921 1923 - S lastSun 2 0 S +R Ab 1942 o - F 9 2 1 W +R Ab 1945 o - Au 14 23u 1 P +R Ab 1945 o - S lastSun 2 0 S +R Ab 1947 o - Ap lastSun 2 1 D +R Ab 1947 o - S lastSun 2 0 S +R Ab 1967 o - Ap lastSun 2 1 D +R Ab 1967 o - O lastSun 2 0 S +R Ab 1969 o - Ap lastSun 2 1 D +R Ab 1969 o - O lastSun 2 0 S +R Ab 1972 1986 - Ap lastSun 2 1 D +R Ab 1972 2006 - O lastSun 2 0 S +Z America/Edmonton -7:33:52 - LMT 1906 S +-7 Ab M%sT 1987 +-7 AT M%sT +R Ac 1918 o - Ap 14 2 1 D +R Ac 1918 o - O 27 2 0 S +R Ac 1942 o - F 9 2 1 W +R Ac 1945 o - Au 14 23u 1 P +R Ac 1945 o - S 30 2 0 S +R Ac 1946 1986 - Ap lastSun 2 1 D +R Ac 1946 o - O 13 2 0 S +R Ac 1947 1961 - S lastSun 2 0 S +R Ac 1962 2006 - O lastSun 2 0 S +Z America/Vancouver -8:12:28 - LMT 1884 +-8 Ac P%sT 1987 +-8 AT P%sT +Z America/Dawson_Creek -8:0:56 - LMT 1884 +-8 AT P%sT 1947 +-8 Ac P%sT 1972 Au 30 2 +-7 - MST +Z America/Fort_Nelson -8:10:47 - LMT 1884 +-8 Ac P%sT 1946 +-8 - PST 1947 +-8 Ac P%sT 1987 +-8 AT P%sT 2015 Mar 8 2 +-7 - MST +Z America/Creston -7:46:4 - LMT 1884 +-7 - MST 1916 O +-8 - PST 1918 Jun 2 +-7 - MST +R Ad 1918 o - Ap 14 2 1 D +R Ad 1918 o - O 27 2 0 S +R Ad 1919 o - May 25 2 1 D +R Ad 1919 o - N 1 0 0 S +R Ad 1942 o - F 9 2 1 W +R Ad 1945 o - Au 14 23u 1 P +R Ad 1945 o - S 30 2 0 S +R Ad 1965 o - Ap lastSun 0 2 DD +R Ad 1965 o - O lastSun 2 0 S +R Ad 1980 1986 - Ap lastSun 2 1 D +R Ad 1980 2006 - O lastSun 2 0 S +R Ad 1987 2006 - Ap Sun>=1 2 1 D +Z America/Pangnirtung 0 - -00 1921 +-4 Ad A%sT 1995 Ap Sun>=1 2 +-5 AT E%sT 1999 O 31 2 +-6 AT C%sT 2000 O 29 2 +-5 AT E%sT +Z America/Iqaluit 0 - -00 1942 Au +-5 Ad E%sT 1999 O 31 2 +-6 AT C%sT 2000 O 29 2 +-5 AT E%sT +Z America/Resolute 0 - -00 1947 Au 31 +-6 Ad C%sT 2000 O 29 2 +-5 - EST 2001 Ap 1 3 +-6 AT C%sT 2006 O 29 2 +-5 - EST 2007 Mar 11 3 +-6 AT C%sT +Z America/Rankin_Inlet 0 - -00 1957 +-6 Ad C%sT 2000 O 29 2 +-5 - EST 2001 Ap 1 3 +-6 AT C%sT +Z America/Cambridge_Bay 0 - -00 1920 +-7 Ad M%sT 1999 O 31 2 +-6 AT C%sT 2000 O 29 2 +-5 - EST 2000 N 5 +-6 - CST 2001 Ap 1 3 +-7 AT M%sT +Z America/Yellowknife 0 - -00 1935 +-7 Ad M%sT 1980 +-7 AT M%sT +Z America/Inuvik 0 - -00 1953 +-8 Ad P%sT 1979 Ap lastSun 2 +-7 Ad M%sT 1980 +-7 AT M%sT +Z America/Whitehorse -9:0:12 - LMT 1900 Au 20 +-9 Ad Y%sT 1967 May 28 +-8 Ad P%sT 1980 +-8 AT P%sT +Z America/Dawson -9:17:40 - LMT 1900 Au 20 +-9 Ad Y%sT 1973 O 28 +-8 Ad P%sT 1980 +-8 AT P%sT +R Ae 1939 o - F 5 0 1 D +R Ae 1939 o - Jun 25 0 0 S +R Ae 1940 o - D 9 0 1 D +R Ae 1941 o - Ap 1 0 0 S +R Ae 1943 o - D 16 0 1 W +R Ae 1944 o - May 1 0 0 S +R Ae 1950 o - F 12 0 1 D +R Ae 1950 o - Jul 30 0 0 S +R Ae 1996 2000 - Ap Sun>=1 2 1 D +R Ae 1996 2000 - O lastSun 2 0 S +R Ae 2001 o - May Sun>=1 2 1 D +R Ae 2001 o - S lastSun 2 0 S +R Ae 2002 ma - Ap Sun>=1 2 1 D +R Ae 2002 ma - O lastSun 2 0 S +Z America/Cancun -5:47:4 - LMT 1922 Ja 1 0:12:56 +-6 - CST 1981 D 23 +-5 Ae E%sT 1998 Au 2 2 +-6 Ae C%sT 2015 F 1 2 +-5 - EST +Z America/Merida -5:58:28 - LMT 1922 Ja 1 0:1:32 +-6 - CST 1981 D 23 +-5 - EST 1982 D 2 +-6 Ae C%sT +Z America/Matamoros -6:40 - LMT 1921 D 31 23:20 +-6 - CST 1988 +-6 AE C%sT 1989 +-6 Ae C%sT 2010 +-6 AE C%sT +Z America/Monterrey -6:41:16 - LMT 1921 D 31 23:18:44 +-6 - CST 1988 +-6 AE C%sT 1989 +-6 Ae C%sT +Z America/Mexico_City -6:36:36 - LMT 1922 Ja 1 0:23:24 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 Ae C%sT 2001 S 30 2 +-6 - CST 2002 F 20 +-6 Ae C%sT +Z America/Ojinaga -6:57:40 - LMT 1922 Ja 1 0:2:20 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1996 +-6 Ae C%sT 1998 +-6 - CST 1998 Ap Sun>=1 3 +-7 Ae M%sT 2010 +-7 AE M%sT +Z America/Chihuahua -7:4:20 - LMT 1921 D 31 23:55:40 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1996 +-6 Ae C%sT 1998 +-6 - CST 1998 Ap Sun>=1 3 +-7 Ae M%sT +Z America/Hermosillo -7:23:52 - LMT 1921 D 31 23:36:8 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 Ae M%sT 1999 +-7 - MST +Z America/Mazatlan -7:5:40 - LMT 1921 D 31 23:54:20 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 Ae M%sT +Z America/Bahia_Banderas -7:1 - LMT 1921 D 31 23:59 +-7 - MST 1927 Jun 10 23 +-6 - CST 1930 N 15 +-7 - MST 1931 May 1 23 +-6 - CST 1931 O +-7 - MST 1932 Ap +-6 - CST 1942 Ap 24 +-7 - MST 1949 Ja 14 +-8 - PST 1970 +-7 Ae M%sT 2010 Ap 4 2 +-6 Ae C%sT +Z America/Tijuana -7:48:4 - LMT 1922 Ja 1 0:11:56 +-7 - MST 1924 +-8 - PST 1927 Jun 10 23 +-7 - MST 1930 N 15 +-8 - PST 1931 Ap +-8 1 PDT 1931 S 30 +-8 - PST 1942 Ap 24 +-8 1 PWT 1945 Au 14 23u +-8 1 PPT 1945 N 12 +-8 - PST 1948 Ap 5 +-8 1 PDT 1949 Ja 14 +-8 - PST 1954 +-8 AI P%sT 1961 +-8 - PST 1976 +-8 AE P%sT 1996 +-8 Ae P%sT 2001 +-8 AE P%sT 2002 F 20 +-8 Ae P%sT 2010 +-8 AE P%sT +R Af 1964 1975 - O lastSun 2 0 S +R Af 1964 1975 - Ap lastSun 2 1 D +Z America/Nassau -5:9:30 - LMT 1912 Mar 2 +-5 Af E%sT 1976 +-5 AE E%sT +R Ag 1977 o - Jun 12 2 1 D +R Ag 1977 1978 - O Sun>=1 2 0 S +R Ag 1978 1980 - Ap Sun>=15 2 1 D +R Ag 1979 o - S 30 2 0 S +R Ag 1980 o - S 25 2 0 S +Z America/Barbados -3:58:29 - LMT 1924 +-3:58:29 - BMT 1932 +-4 Ag A%sT +R Ah 1918 1942 - O Sun>=2 0 0:30 -0530 +R Ah 1919 1943 - F Sun>=9 0 0 CST +R Ah 1973 o - D 5 0 1 CDT +R Ah 1974 o - F 9 0 0 CST +R Ah 1982 o - D 18 0 1 CDT +R Ah 1983 o - F 12 0 0 CST +Z America/Belize -5:52:48 - LMT 1912 Ap +-6 Ah %s +Z Atlantic/Bermuda -4:19:18 - LMT 1930 Ja 1 2 +-4 - AST 1974 Ap 28 2 +-4 AT A%sT 1976 +-4 AE A%sT +R Ai 1979 1980 - F lastSun 0 1 D +R Ai 1979 1980 - Jun Sun>=1 0 0 S +R Ai 1991 1992 - Ja Sat>=15 0 1 D +R Ai 1991 o - Jul 1 0 0 S +R Ai 1992 o - Mar 15 0 0 S +Z America/Costa_Rica -5:36:13 - LMT 1890 +-5:36:13 - SJMT 1921 Ja 15 +-6 Ai C%sT +R Aj 1928 o - Jun 10 0 1 D +R Aj 1928 o - O 10 0 0 S +R Aj 1940 1942 - Jun Sun>=1 0 1 D +R Aj 1940 1942 - S Sun>=1 0 0 S +R Aj 1945 1946 - Jun Sun>=1 0 1 D +R Aj 1945 1946 - S Sun>=1 0 0 S +R Aj 1965 o - Jun 1 0 1 D +R Aj 1965 o - S 30 0 0 S +R Aj 1966 o - May 29 0 1 D +R Aj 1966 o - O 2 0 0 S +R Aj 1967 o - Ap 8 0 1 D +R Aj 1967 1968 - S Sun>=8 0 0 S +R Aj 1968 o - Ap 14 0 1 D +R Aj 1969 1977 - Ap lastSun 0 1 D +R Aj 1969 1971 - O lastSun 0 0 S +R Aj 1972 1974 - O 8 0 0 S +R Aj 1975 1977 - O lastSun 0 0 S +R Aj 1978 o - May 7 0 1 D +R Aj 1978 1990 - O Sun>=8 0 0 S +R Aj 1979 1980 - Mar Sun>=15 0 1 D +R Aj 1981 1985 - May Sun>=5 0 1 D +R Aj 1986 1989 - Mar Sun>=14 0 1 D +R Aj 1990 1997 - Ap Sun>=1 0 1 D +R Aj 1991 1995 - O Sun>=8 0s 0 S +R Aj 1996 o - O 6 0s 0 S +R Aj 1997 o - O 12 0s 0 S +R Aj 1998 1999 - Mar lastSun 0s 1 D +R Aj 1998 2003 - O lastSun 0s 0 S +R Aj 2000 2003 - Ap Sun>=1 0s 1 D +R Aj 2004 o - Mar lastSun 0s 1 D +R Aj 2006 2010 - O lastSun 0s 0 S +R Aj 2007 o - Mar Sun>=8 0s 1 D +R Aj 2008 o - Mar Sun>=15 0s 1 D +R Aj 2009 2010 - Mar Sun>=8 0s 1 D +R Aj 2011 o - Mar Sun>=15 0s 1 D +R Aj 2011 o - N 13 0s 0 S +R Aj 2012 o - Ap 1 0s 1 D +R Aj 2012 ma - N Sun>=1 0s 0 S +R Aj 2013 ma - Mar Sun>=8 0s 1 D +Z America/Havana -5:29:28 - LMT 1890 +-5:29:36 - HMT 1925 Jul 19 12 +-5 Aj C%sT +R Ak 1966 o - O 30 0 1 EDT +R Ak 1967 o - F 28 0 0 EST +R Ak 1969 1973 - O lastSun 0 0:30 -0430 +R Ak 1970 o - F 21 0 0 EST +R Ak 1971 o - Ja 20 0 0 EST +R Ak 1972 1974 - Ja 21 0 0 EST +Z America/Santo_Domingo -4:39:36 - LMT 1890 +-4:40 - SDMT 1933 Ap 1 12 +-5 Ak %s 1974 O 27 +-4 - AST 2000 O 29 2 +-5 AE E%sT 2000 D 3 1 +-4 - AST +R Al 1987 1988 - May Sun>=1 0 1 D +R Al 1987 1988 - S lastSun 0 0 S +Z America/El_Salvador -5:56:48 - LMT 1921 +-6 Al C%sT +R Am 1973 o - N 25 0 1 D +R Am 1974 o - F 24 0 0 S +R Am 1983 o - May 21 0 1 D +R Am 1983 o - S 22 0 0 S +R Am 1991 o - Mar 23 0 1 D +R Am 1991 o - S 7 0 0 S +R Am 2006 o - Ap 30 0 1 D +R Am 2006 o - O 1 0 0 S +Z America/Guatemala -6:2:4 - LMT 1918 O 5 +-6 Am C%sT +R An 1983 o - May 8 0 1 D +R An 1984 1987 - Ap lastSun 0 1 D +R An 1983 1987 - O lastSun 0 0 S +R An 1988 1997 - Ap Sun>=1 1s 1 D +R An 1988 1997 - O lastSun 1s 0 S +R An 2005 2006 - Ap Sun>=1 0 1 D +R An 2005 2006 - O lastSun 0 0 S +R An 2012 2015 - Mar Sun>=8 2 1 D +R An 2012 2015 - N Sun>=1 2 0 S +R An 2017 ma - Mar Sun>=8 2 1 D +R An 2017 ma - N Sun>=1 2 0 S +Z America/Port-au-Prince -4:49:20 - LMT 1890 +-4:49 - PPMT 1917 Ja 24 12 +-5 An E%sT +R Ao 1987 1988 - May Sun>=1 0 1 D +R Ao 1987 1988 - S lastSun 0 0 S +R Ao 2006 o - May Sun>=1 0 1 D +R Ao 2006 o - Au M>=1 0 0 S +Z America/Tegucigalpa -5:48:52 - LMT 1921 Ap +-6 Ao C%sT +Z America/Jamaica -5:7:11 - LMT 1890 +-5:7:11 - KMT 1912 F +-5 - EST 1974 +-5 AE E%sT 1984 +-5 - EST +Z America/Martinique -4:4:20 - LMT 1890 +-4:4:20 - FFMT 1911 May +-4 - AST 1980 Ap 6 +-4 1 ADT 1980 S 28 +-4 - AST +R Ap 1979 1980 - Mar Sun>=16 0 1 D +R Ap 1979 1980 - Jun M>=23 0 0 S +R Ap 2005 o - Ap 10 0 1 D +R Ap 2005 o - O Sun>=1 0 0 S +R Ap 2006 o - Ap 30 2 1 D +R Ap 2006 o - O Sun>=1 1 0 S +Z America/Managua -5:45:8 - LMT 1890 +-5:45:12 - MMT 1934 Jun 23 +-6 - CST 1973 May +-5 - EST 1975 F 16 +-6 Ap C%sT 1992 Ja 1 4 +-5 - EST 1992 S 24 +-6 - CST 1993 +-5 - EST 1997 +-6 Ap C%sT +Z America/Panama -5:18:8 - LMT 1890 +-5:19:36 - CMT 1908 Ap 22 +-5 - EST +Li America/Panama America/Cayman +Z America/Puerto_Rico -4:24:25 - LMT 1899 Mar 28 12 +-4 - AST 1942 May 3 +-4 AE A%sT 1946 +-4 - AST +Z America/Miquelon -3:44:40 - LMT 1911 May 15 +-4 - AST 1980 May +-3 - -03 1987 +-3 AT -03/-02 +Z America/Grand_Turk -4:44:32 - LMT 1890 +-5:7:11 - KMT 1912 F +-5 - EST 1979 +-5 AE E%sT 2015 N Sun>=1 2 +-4 - AST 2018 Mar 11 3 +-5 AE E%sT +R Aq 1930 o - D 1 0 1 S +R Aq 1931 o - Ap 1 0 0 - +R Aq 1931 o - O 15 0 1 S +R Aq 1932 1940 - Mar 1 0 0 - +R Aq 1932 1939 - N 1 0 1 S +R Aq 1940 o - Jul 1 0 1 S +R Aq 1941 o - Jun 15 0 0 - +R Aq 1941 o - O 15 0 1 S +R Aq 1943 o - Au 1 0 0 - +R Aq 1943 o - O 15 0 1 S +R Aq 1946 o - Mar 1 0 0 - +R Aq 1946 o - O 1 0 1 S +R Aq 1963 o - O 1 0 0 - +R Aq 1963 o - D 15 0 1 S +R Aq 1964 1966 - Mar 1 0 0 - +R Aq 1964 1966 - O 15 0 1 S +R Aq 1967 o - Ap 2 0 0 - +R Aq 1967 1968 - O Sun>=1 0 1 S +R Aq 1968 1969 - Ap Sun>=1 0 0 - +R Aq 1974 o - Ja 23 0 1 S +R Aq 1974 o - May 1 0 0 - +R Aq 1988 o - D 1 0 1 S +R Aq 1989 1993 - Mar Sun>=1 0 0 - +R Aq 1989 1992 - O Sun>=15 0 1 S +R Aq 1999 o - O Sun>=1 0 1 S +R Aq 2000 o - Mar 3 0 0 - +R Aq 2007 o - D 30 0 1 S +R Aq 2008 2009 - Mar Sun>=15 0 0 - +R Aq 2008 o - O Sun>=15 0 1 S +Z America/Argentina/Buenos_Aires -3:53:48 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 Aq -03/-02 +Z America/Argentina/Cordoba -4:16:48 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 Aq -03/-02 +Z America/Argentina/Salta -4:21:40 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Tucuman -4:20:52 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 13 +-3 Aq -03/-02 +Z America/Argentina/La_Rioja -4:27:24 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar +-4 - -04 1991 May 7 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/San_Juan -4:34:4 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar +-4 - -04 1991 May 7 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 May 31 +-4 - -04 2004 Jul 25 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Jujuy -4:21:12 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1990 Mar 4 +-4 - -04 1990 O 28 +-4 1 -03 1991 Mar 17 +-4 - -04 1991 O 6 +-3 1 -02 1992 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Catamarca -4:23:8 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1991 Mar 3 +-4 - -04 1991 O 20 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Mendoza -4:35:16 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1990 Mar 4 +-4 - -04 1990 O 15 +-4 1 -03 1991 Mar +-4 - -04 1991 O 15 +-4 1 -03 1992 Mar +-4 - -04 1992 O 18 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 May 23 +-4 - -04 2004 S 26 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +R Ar 2008 2009 - Mar Sun>=8 0 0 - +R Ar 2007 2008 - O Sun>=8 0 1 S +Z America/Argentina/San_Luis -4:25:24 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1990 +-3 1 -02 1990 Mar 14 +-4 - -04 1990 O 15 +-4 1 -03 1991 Mar +-4 - -04 1991 Jun +-3 - -03 1999 O 3 +-4 1 -03 2000 Mar 3 +-3 - -03 2004 May 31 +-4 - -04 2004 Jul 25 +-3 Aq -03/-02 2008 Ja 21 +-4 Ar -04/-03 2009 O 11 +-3 - -03 +Z America/Argentina/Rio_Gallegos -4:36:52 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 Jun +-4 - -04 2004 Jun 20 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Z America/Argentina/Ushuaia -4:33:12 - LMT 1894 O 31 +-4:16:48 - CMT 1920 May +-4 - -04 1930 D +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1999 O 3 +-4 Aq -04/-03 2000 Mar 3 +-3 - -03 2004 May 30 +-4 - -04 2004 Jun 20 +-3 Aq -03/-02 2008 O 18 +-3 - -03 +Li America/Curacao America/Aruba +Z America/La_Paz -4:32:36 - LMT 1890 +-4:32:36 - CMT 1931 O 15 +-4:32:36 1 BOST 1932 Mar 21 +-4 - -04 +R As 1931 o - O 3 11 1 S +R As 1932 1933 - Ap 1 0 0 - +R As 1932 o - O 3 0 1 S +R As 1949 1952 - D 1 0 1 S +R As 1950 o - Ap 16 1 0 - +R As 1951 1952 - Ap 1 0 0 - +R As 1953 o - Mar 1 0 0 - +R As 1963 o - D 9 0 1 S +R As 1964 o - Mar 1 0 0 - +R As 1965 o - Ja 31 0 1 S +R As 1965 o - Mar 31 0 0 - +R As 1965 o - D 1 0 1 S +R As 1966 1968 - Mar 1 0 0 - +R As 1966 1967 - N 1 0 1 S +R As 1985 o - N 2 0 1 S +R As 1986 o - Mar 15 0 0 - +R As 1986 o - O 25 0 1 S +R As 1987 o - F 14 0 0 - +R As 1987 o - O 25 0 1 S +R As 1988 o - F 7 0 0 - +R As 1988 o - O 16 0 1 S +R As 1989 o - Ja 29 0 0 - +R As 1989 o - O 15 0 1 S +R As 1990 o - F 11 0 0 - +R As 1990 o - O 21 0 1 S +R As 1991 o - F 17 0 0 - +R As 1991 o - O 20 0 1 S +R As 1992 o - F 9 0 0 - +R As 1992 o - O 25 0 1 S +R As 1993 o - Ja 31 0 0 - +R As 1993 1995 - O Sun>=11 0 1 S +R As 1994 1995 - F Sun>=15 0 0 - +R As 1996 o - F 11 0 0 - +R As 1996 o - O 6 0 1 S +R As 1997 o - F 16 0 0 - +R As 1997 o - O 6 0 1 S +R As 1998 o - Mar 1 0 0 - +R As 1998 o - O 11 0 1 S +R As 1999 o - F 21 0 0 - +R As 1999 o - O 3 0 1 S +R As 2000 o - F 27 0 0 - +R As 2000 2001 - O Sun>=8 0 1 S +R As 2001 2006 - F Sun>=15 0 0 - +R As 2002 o - N 3 0 1 S +R As 2003 o - O 19 0 1 S +R As 2004 o - N 2 0 1 S +R As 2005 o - O 16 0 1 S +R As 2006 o - N 5 0 1 S +R As 2007 o - F 25 0 0 - +R As 2007 o - O Sun>=8 0 1 S +R As 2008 ma - O Sun>=15 0 1 S +R As 2008 2011 - F Sun>=15 0 0 - +R As 2012 o - F Sun>=22 0 0 - +R As 2013 2014 - F Sun>=15 0 0 - +R As 2015 o - F Sun>=22 0 0 - +R As 2016 2022 - F Sun>=15 0 0 - +R As 2023 o - F Sun>=22 0 0 - +R As 2024 2025 - F Sun>=15 0 0 - +R As 2026 o - F Sun>=22 0 0 - +R As 2027 2033 - F Sun>=15 0 0 - +R As 2034 o - F Sun>=22 0 0 - +R As 2035 2036 - F Sun>=15 0 0 - +R As 2037 o - F Sun>=22 0 0 - +R As 2038 ma - F Sun>=15 0 0 - +Z America/Noronha -2:9:40 - LMT 1914 +-2 As -02/-01 1990 S 17 +-2 - -02 1999 S 30 +-2 As -02/-01 2000 O 15 +-2 - -02 2001 S 13 +-2 As -02/-01 2002 O +-2 - -02 +Z America/Belem -3:13:56 - LMT 1914 +-3 As -03/-02 1988 S 12 +-3 - -03 +Z America/Santarem -3:38:48 - LMT 1914 +-4 As -04/-03 1988 S 12 +-4 - -04 2008 Jun 24 +-3 - -03 +Z America/Fortaleza -2:34 - LMT 1914 +-3 As -03/-02 1990 S 17 +-3 - -03 1999 S 30 +-3 As -03/-02 2000 O 22 +-3 - -03 2001 S 13 +-3 As -03/-02 2002 O +-3 - -03 +Z America/Recife -2:19:36 - LMT 1914 +-3 As -03/-02 1990 S 17 +-3 - -03 1999 S 30 +-3 As -03/-02 2000 O 15 +-3 - -03 2001 S 13 +-3 As -03/-02 2002 O +-3 - -03 +Z America/Araguaina -3:12:48 - LMT 1914 +-3 As -03/-02 1990 S 17 +-3 - -03 1995 S 14 +-3 As -03/-02 2003 S 24 +-3 - -03 2012 O 21 +-3 As -03/-02 2013 S +-3 - -03 +Z America/Maceio -2:22:52 - LMT 1914 +-3 As -03/-02 1990 S 17 +-3 - -03 1995 O 13 +-3 As -03/-02 1996 S 4 +-3 - -03 1999 S 30 +-3 As -03/-02 2000 O 22 +-3 - -03 2001 S 13 +-3 As -03/-02 2002 O +-3 - -03 +Z America/Bahia -2:34:4 - LMT 1914 +-3 As -03/-02 2003 S 24 +-3 - -03 2011 O 16 +-3 As -03/-02 2012 O 21 +-3 - -03 +Z America/Sao_Paulo -3:6:28 - LMT 1914 +-3 As -03/-02 1963 O 23 +-3 1 -02 1964 +-3 As -03/-02 +Z America/Campo_Grande -3:38:28 - LMT 1914 +-4 As -04/-03 +Z America/Cuiaba -3:44:20 - LMT 1914 +-4 As -04/-03 2003 S 24 +-4 - -04 2004 O +-4 As -04/-03 +Z America/Porto_Velho -4:15:36 - LMT 1914 +-4 As -04/-03 1988 S 12 +-4 - -04 +Z America/Boa_Vista -4:2:40 - LMT 1914 +-4 As -04/-03 1988 S 12 +-4 - -04 1999 S 30 +-4 As -04/-03 2000 O 15 +-4 - -04 +Z America/Manaus -4:0:4 - LMT 1914 +-4 As -04/-03 1988 S 12 +-4 - -04 1993 S 28 +-4 As -04/-03 1994 S 22 +-4 - -04 +Z America/Eirunepe -4:39:28 - LMT 1914 +-5 As -05/-04 1988 S 12 +-5 - -05 1993 S 28 +-5 As -05/-04 1994 S 22 +-5 - -05 2008 Jun 24 +-4 - -04 2013 N 10 +-5 - -05 +Z America/Rio_Branco -4:31:12 - LMT 1914 +-5 As -05/-04 1988 S 12 +-5 - -05 2008 Jun 24 +-4 - -04 2013 N 10 +-5 - -05 +R At 1927 1931 - S 1 0 1 S +R At 1928 1932 - Ap 1 0 0 - +R At 1968 o - N 3 4u 1 S +R At 1969 o - Mar 30 3u 0 - +R At 1969 o - N 23 4u 1 S +R At 1970 o - Mar 29 3u 0 - +R At 1971 o - Mar 14 3u 0 - +R At 1970 1972 - O Sun>=9 4u 1 S +R At 1972 1986 - Mar Sun>=9 3u 0 - +R At 1973 o - S 30 4u 1 S +R At 1974 1987 - O Sun>=9 4u 1 S +R At 1987 o - Ap 12 3u 0 - +R At 1988 1990 - Mar Sun>=9 3u 0 - +R At 1988 1989 - O Sun>=9 4u 1 S +R At 1990 o - S 16 4u 1 S +R At 1991 1996 - Mar Sun>=9 3u 0 - +R At 1991 1997 - O Sun>=9 4u 1 S +R At 1997 o - Mar 30 3u 0 - +R At 1998 o - Mar Sun>=9 3u 0 - +R At 1998 o - S 27 4u 1 S +R At 1999 o - Ap 4 3u 0 - +R At 1999 2010 - O Sun>=9 4u 1 S +R At 2000 2007 - Mar Sun>=9 3u 0 - +R At 2008 o - Mar 30 3u 0 - +R At 2009 o - Mar Sun>=9 3u 0 - +R At 2010 o - Ap Sun>=1 3u 0 - +R At 2011 o - May Sun>=2 3u 0 - +R At 2011 o - Au Sun>=16 4u 1 S +R At 2012 2014 - Ap Sun>=23 3u 0 - +R At 2012 2014 - S Sun>=2 4u 1 S +R At 2016 ma - May Sun>=9 3u 0 - +R At 2016 ma - Au Sun>=9 4u 1 S +Z America/Santiago -4:42:46 - LMT 1890 +-4:42:46 - SMT 1910 Ja 10 +-5 - -05 1916 Jul +-4:42:46 - SMT 1918 S 10 +-4 - -04 1919 Jul +-4:42:46 - SMT 1927 S +-5 At -05/-04 1932 S +-4 - -04 1942 Jun +-5 - -05 1942 Au +-4 - -04 1946 Jul 15 +-4 1 -03 1946 S +-4 - -04 1947 Ap +-5 - -05 1947 May 21 23 +-4 At -04/-03 +Z America/Punta_Arenas -4:43:40 - LMT 1890 +-4:42:46 - SMT 1910 Ja 10 +-5 - -05 1916 Jul +-4:42:46 - SMT 1918 S 10 +-4 - -04 1919 Jul +-4:42:46 - SMT 1927 S +-5 At -05/-04 1932 S +-4 - -04 1942 Jun +-5 - -05 1942 Au +-4 - -04 1947 Ap +-5 - -05 1947 May 21 23 +-4 At -04/-03 2016 D 4 +-3 - -03 +Z Pacific/Easter -7:17:28 - LMT 1890 +-7:17:28 - EMT 1932 S +-7 At -07/-06 1982 Mar 14 3u +-6 At -06/-05 +Z Antarctica/Palmer 0 - -00 1965 +-4 Aq -04/-03 1969 O 5 +-3 Aq -03/-02 1982 May +-4 At -04/-03 2016 D 4 +-3 - -03 +R Au 1992 o - May 3 0 1 S +R Au 1993 o - Ap 4 0 0 - +Z America/Bogota -4:56:16 - LMT 1884 Mar 13 +-4:56:16 - BMT 1914 N 23 +-5 Au -05/-04 +Z America/Curacao -4:35:47 - LMT 1912 F 12 +-4:30 - -0430 1965 +-4 - AST +Li America/Curacao America/Lower_Princes +Li America/Curacao America/Kralendijk +R Av 1992 o - N 28 0 1 S +R Av 1993 o - F 5 0 0 - +Z America/Guayaquil -5:19:20 - LMT 1890 +-5:14 - QMT 1931 +-5 Av -05/-04 +Z Pacific/Galapagos -5:58:24 - LMT 1931 +-5 - -05 1986 +-6 Av -06/-05 +R Aw 1937 1938 - S lastSun 0 1 S +R Aw 1938 1942 - Mar Sun>=19 0 0 - +R Aw 1939 o - O 1 0 1 S +R Aw 1940 1942 - S lastSun 0 1 S +R Aw 1943 o - Ja 1 0 0 - +R Aw 1983 o - S lastSun 0 1 S +R Aw 1984 1985 - Ap lastSun 0 0 - +R Aw 1984 o - S 16 0 1 S +R Aw 1985 2000 - S Sun>=9 0 1 S +R Aw 1986 2000 - Ap Sun>=16 0 0 - +R Aw 2001 2010 - Ap Sun>=15 2 0 - +R Aw 2001 2010 - S Sun>=1 2 1 S +Z Atlantic/Stanley -3:51:24 - LMT 1890 +-3:51:24 - SMT 1912 Mar 12 +-4 Aw -04/-03 1983 May +-3 Aw -03/-02 1985 S 15 +-4 Aw -04/-03 2010 S 5 2 +-3 - -03 +Z America/Cayenne -3:29:20 - LMT 1911 Jul +-4 - -04 1967 O +-3 - -03 +Z America/Guyana -3:52:40 - LMT 1915 Mar +-3:45 - -0345 1975 Jul 31 +-3 - -03 1991 +-4 - -04 +R Ax 1975 1988 - O 1 0 1 S +R Ax 1975 1978 - Mar 1 0 0 - +R Ax 1979 1991 - Ap 1 0 0 - +R Ax 1989 o - O 22 0 1 S +R Ax 1990 o - O 1 0 1 S +R Ax 1991 o - O 6 0 1 S +R Ax 1992 o - Mar 1 0 0 - +R Ax 1992 o - O 5 0 1 S +R Ax 1993 o - Mar 31 0 0 - +R Ax 1993 1995 - O 1 0 1 S +R Ax 1994 1995 - F lastSun 0 0 - +R Ax 1996 o - Mar 1 0 0 - +R Ax 1996 2001 - O Sun>=1 0 1 S +R Ax 1997 o - F lastSun 0 0 - +R Ax 1998 2001 - Mar Sun>=1 0 0 - +R Ax 2002 2004 - Ap Sun>=1 0 0 - +R Ax 2002 2003 - S Sun>=1 0 1 S +R Ax 2004 2009 - O Sun>=15 0 1 S +R Ax 2005 2009 - Mar Sun>=8 0 0 - +R Ax 2010 ma - O Sun>=1 0 1 S +R Ax 2010 2012 - Ap Sun>=8 0 0 - +R Ax 2013 ma - Mar Sun>=22 0 0 - +Z America/Asuncion -3:50:40 - LMT 1890 +-3:50:40 - AMT 1931 O 10 +-4 - -04 1972 O +-3 - -03 1974 Ap +-4 Ax -04/-03 +R Ay 1938 o - Ja 1 0 1 S +R Ay 1938 o - Ap 1 0 0 - +R Ay 1938 1939 - S lastSun 0 1 S +R Ay 1939 1940 - Mar Sun>=24 0 0 - +R Ay 1986 1987 - Ja 1 0 1 S +R Ay 1986 1987 - Ap 1 0 0 - +R Ay 1990 o - Ja 1 0 1 S +R Ay 1990 o - Ap 1 0 0 - +R Ay 1994 o - Ja 1 0 1 S +R Ay 1994 o - Ap 1 0 0 - +Z America/Lima -5:8:12 - LMT 1890 +-5:8:36 - LMT 1908 Jul 28 +-5 Ay -05/-04 +Z Atlantic/South_Georgia -2:26:8 - LMT 1890 +-2 - -02 +Z America/Paramaribo -3:40:40 - LMT 1911 +-3:40:52 - PMT 1935 +-3:40:36 - PMT 1945 O +-3:30 - -0330 1984 O +-3 - -03 +Z America/Port_of_Spain -4:6:4 - LMT 1912 Mar 2 +-4 - AST +Li America/Port_of_Spain America/Anguilla +Li America/Port_of_Spain America/Antigua +Li America/Port_of_Spain America/Dominica +Li America/Port_of_Spain America/Grenada +Li America/Port_of_Spain America/Guadeloupe +Li America/Port_of_Spain America/Marigot +Li America/Port_of_Spain America/Montserrat +Li America/Port_of_Spain America/St_Barthelemy +Li America/Port_of_Spain America/St_Kitts +Li America/Port_of_Spain America/St_Lucia +Li America/Port_of_Spain America/St_Thomas +Li America/Port_of_Spain America/St_Vincent +Li America/Port_of_Spain America/Tortola +R Az 1923 o - O 2 0 0:30 HS +R Az 1924 1926 - Ap 1 0 0 - +R Az 1924 1925 - O 1 0 0:30 HS +R Az 1933 1935 - O lastSun 0 0:30 HS +R Az 1934 1936 - Mar Sat>=25 23:30s 0 - +R Az 1936 o - N 1 0 0:30 HS +R Az 1937 1941 - Mar lastSun 0 0 - +R Az 1937 1940 - O lastSun 0 0:30 HS +R Az 1941 o - Au 1 0 0:30 HS +R Az 1942 o - Ja 1 0 0 - +R Az 1942 o - D 14 0 1 S +R Az 1943 o - Mar 14 0 0 - +R Az 1959 o - May 24 0 1 S +R Az 1959 o - N 15 0 0 - +R Az 1960 o - Ja 17 0 1 S +R Az 1960 o - Mar 6 0 0 - +R Az 1965 1967 - Ap Sun>=1 0 1 S +R Az 1965 o - S 26 0 0 - +R Az 1966 1967 - O 31 0 0 - +R Az 1968 1970 - May 27 0 0:30 HS +R Az 1968 1970 - D 2 0 0 - +R Az 1972 o - Ap 24 0 1 S +R Az 1972 o - Au 15 0 0 - +R Az 1974 o - Mar 10 0 0:30 HS +R Az 1974 o - D 22 0 1 S +R Az 1976 o - O 1 0 0 - +R Az 1977 o - D 4 0 1 S +R Az 1978 o - Ap 1 0 0 - +R Az 1979 o - O 1 0 1 S +R Az 1980 o - May 1 0 0 - +R Az 1987 o - D 14 0 1 S +R Az 1988 o - Mar 14 0 0 - +R Az 1988 o - D 11 0 1 S +R Az 1989 o - Mar 12 0 0 - +R Az 1989 o - O 29 0 1 S +R Az 1990 1992 - Mar Sun>=1 0 0 - +R Az 1990 1991 - O Sun>=21 0 1 S +R Az 1992 o - O 18 0 1 S +R Az 1993 o - F 28 0 0 - +R Az 2004 o - S 19 0 1 S +R Az 2005 o - Mar 27 2 0 - +R Az 2005 o - O 9 2 1 S +R Az 2006 o - Mar 12 2 0 - +R Az 2006 2014 - O Sun>=1 2 1 S +R Az 2007 2015 - Mar Sun>=8 2 0 - +Z America/Montevideo -3:44:44 - LMT 1898 Jun 28 +-3:44:44 - MMT 1920 May +-3:30 Az -0330/-03 1942 D 14 +-3 Az -03/-02 1968 +-3 Az -03/-0230 1971 +-3 Az -03/-02 1974 +-3 Az -03/-0230 1974 D 22 +-3 Az -03/-02 +Z America/Caracas -4:27:44 - LMT 1890 +-4:27:40 - CMT 1912 F 12 +-4:30 - -0430 1965 +-4 - -04 2007 D 9 3 +-4:30 - -0430 2016 May 1 2:30 +-4 - -04 +Z Etc/GMT 0 - GMT +Z Etc/UTC 0 - UTC +Z Etc/UCT 0 - UCT +Li Etc/GMT GMT +Li Etc/UTC Etc/Universal +Li Etc/UTC Etc/Zulu +Li Etc/GMT Etc/Greenwich +Li Etc/GMT Etc/GMT-0 +Li Etc/GMT Etc/GMT+0 +Li Etc/GMT Etc/GMT0 +Z Etc/GMT-14 14 - +14 +Z Etc/GMT-13 13 - +13 +Z Etc/GMT-12 12 - +12 +Z Etc/GMT-11 11 - +11 +Z Etc/GMT-10 10 - +10 +Z Etc/GMT-9 9 - +09 +Z Etc/GMT-8 8 - +08 +Z Etc/GMT-7 7 - +07 +Z Etc/GMT-6 6 - +06 +Z Etc/GMT-5 5 - +05 +Z Etc/GMT-4 4 - +04 +Z Etc/GMT-3 3 - +03 +Z Etc/GMT-2 2 - +02 +Z Etc/GMT-1 1 - +01 +Z Etc/GMT+1 -1 - -01 +Z Etc/GMT+2 -2 - -02 +Z Etc/GMT+3 -3 - -03 +Z Etc/GMT+4 -4 - -04 +Z Etc/GMT+5 -5 - -05 +Z Etc/GMT+6 -6 - -06 +Z Etc/GMT+7 -7 - -07 +Z Etc/GMT+8 -8 - -08 +Z Etc/GMT+9 -9 - -09 +Z Etc/GMT+10 -10 - -10 +Z Etc/GMT+11 -11 - -11 +Z Etc/GMT+12 -12 - -12 +Li Africa/Nairobi Africa/Asmera +Li Africa/Abidjan Africa/Timbuktu +Li America/Argentina/Catamarca America/Argentina/ComodRivadavia +Li America/Adak America/Atka +Li America/Argentina/Buenos_Aires America/Buenos_Aires +Li America/Argentina/Catamarca America/Catamarca +Li America/Atikokan America/Coral_Harbour +Li America/Argentina/Cordoba America/Cordoba +Li America/Tijuana America/Ensenada +Li America/Indiana/Indianapolis America/Fort_Wayne +Li America/Indiana/Indianapolis America/Indianapolis +Li America/Argentina/Jujuy America/Jujuy +Li America/Indiana/Knox America/Knox_IN +Li America/Kentucky/Louisville America/Louisville +Li America/Argentina/Mendoza America/Mendoza +Li America/Toronto America/Montreal +Li America/Rio_Branco America/Porto_Acre +Li America/Argentina/Cordoba America/Rosario +Li America/Tijuana America/Santa_Isabel +Li America/Denver America/Shiprock +Li America/Port_of_Spain America/Virgin +Li Pacific/Auckland Antarctica/South_Pole +Li Asia/Ashgabat Asia/Ashkhabad +Li Asia/Kolkata Asia/Calcutta +Li Asia/Shanghai Asia/Chongqing +Li Asia/Shanghai Asia/Chungking +Li Asia/Dhaka Asia/Dacca +Li Asia/Shanghai Asia/Harbin +Li Asia/Urumqi Asia/Kashgar +Li Asia/Kathmandu Asia/Katmandu +Li Asia/Macau Asia/Macao +Li Asia/Yangon Asia/Rangoon +Li Asia/Ho_Chi_Minh Asia/Saigon +Li Asia/Jerusalem Asia/Tel_Aviv +Li Asia/Thimphu Asia/Thimbu +Li Asia/Makassar Asia/Ujung_Pandang +Li Asia/Ulaanbaatar Asia/Ulan_Bator +Li Atlantic/Faroe Atlantic/Faeroe +Li Europe/Oslo Atlantic/Jan_Mayen +Li Australia/Sydney Australia/ACT +Li Australia/Sydney Australia/Canberra +Li Australia/Lord_Howe Australia/LHI +Li Australia/Sydney Australia/NSW +Li Australia/Darwin Australia/North +Li Australia/Brisbane Australia/Queensland +Li Australia/Adelaide Australia/South +Li Australia/Hobart Australia/Tasmania +Li Australia/Melbourne Australia/Victoria +Li Australia/Perth Australia/West +Li Australia/Broken_Hill Australia/Yancowinna +Li America/Rio_Branco Brazil/Acre +Li America/Noronha Brazil/DeNoronha +Li America/Sao_Paulo Brazil/East +Li America/Manaus Brazil/West +Li America/Halifax Canada/Atlantic +Li America/Winnipeg Canada/Central +Li America/Toronto Canada/Eastern +Li America/Edmonton Canada/Mountain +Li America/St_Johns Canada/Newfoundland +Li America/Vancouver Canada/Pacific +Li America/Regina Canada/Saskatchewan +Li America/Whitehorse Canada/Yukon +Li America/Santiago Chile/Continental +Li Pacific/Easter Chile/EasterIsland +Li America/Havana Cuba +Li Africa/Cairo Egypt +Li Europe/Dublin Eire +Li Europe/London Europe/Belfast +Li Europe/Chisinau Europe/Tiraspol +Li Europe/London GB +Li Europe/London GB-Eire +Li Etc/GMT GMT+0 +Li Etc/GMT GMT-0 +Li Etc/GMT GMT0 +Li Etc/GMT Greenwich +Li Asia/Hong_Kong Hongkong +Li Atlantic/Reykjavik Iceland +Li Asia/Tehran Iran +Li Asia/Jerusalem Israel +Li America/Jamaica Jamaica +Li Asia/Tokyo Japan +Li Pacific/Kwajalein Kwajalein +Li Africa/Tripoli Libya +Li America/Tijuana Mexico/BajaNorte +Li America/Mazatlan Mexico/BajaSur +Li America/Mexico_City Mexico/General +Li Pacific/Auckland NZ +Li Pacific/Chatham NZ-CHAT +Li America/Denver Navajo +Li Asia/Shanghai PRC +Li Pacific/Honolulu Pacific/Johnston +Li Pacific/Pohnpei Pacific/Ponape +Li Pacific/Pago_Pago Pacific/Samoa +Li Pacific/Chuuk Pacific/Truk +Li Pacific/Chuuk Pacific/Yap +Li Europe/Warsaw Poland +Li Europe/Lisbon Portugal +Li Asia/Taipei ROC +Li Asia/Seoul ROK +Li Asia/Singapore Singapore +Li Europe/Istanbul Turkey +Li Etc/UCT UCT +Li America/Anchorage US/Alaska +Li America/Adak US/Aleutian +Li America/Phoenix US/Arizona +Li America/Chicago US/Central +Li America/Indiana/Indianapolis US/East-Indiana +Li America/New_York US/Eastern +Li Pacific/Honolulu US/Hawaii +Li America/Indiana/Knox US/Indiana-Starke +Li America/Detroit US/Michigan +Li America/Denver US/Mountain +Li America/Los_Angeles US/Pacific +Li Pacific/Pago_Pago US/Samoa +Li Etc/UTC UTC +Li Etc/UTC Universal +Li Europe/Moscow W-SU +Li Etc/UTC Zulu +Li America/Los_Angeles US/Pacific-New +Z Factory 0 - -00 diff --git a/ext/pytz/zoneinfo/zone.tab b/ext/pytz/zoneinfo/zone.tab index 204048cc5a..2d0b26b7d6 100644 --- a/ext/pytz/zoneinfo/zone.tab +++ b/ext/pytz/zoneinfo/zone.tab @@ -186,7 +186,7 @@ GB +513030-0000731 Europe/London GD +1203-06145 America/Grenada GE +4143+04449 Asia/Tbilisi GF +0456-05220 America/Cayenne -GG +4927-00232 Europe/Guernsey +GG +492717-0023210 Europe/Guernsey GH +0533-00013 Africa/Accra GI +3608-00521 Europe/Gibraltar GL +6411-05144 America/Godthab Greenland (most areas) @@ -221,7 +221,7 @@ IQ +3321+04425 Asia/Baghdad IR +3540+05126 Asia/Tehran IS +6409-02151 Atlantic/Reykjavik IT +4154+01229 Europe/Rome -JE +4912-00207 Europe/Jersey +JE +491101-0020624 Europe/Jersey JM +175805-0764736 America/Jamaica JO +3157+03556 Asia/Amman JP +353916+1394441 Asia/Tokyo diff --git a/ext/pytz/zoneinfo/zone1970.tab b/ext/pytz/zoneinfo/zone1970.tab index 2bcdc64b88..455e7976d2 100644 --- a/ext/pytz/zoneinfo/zone1970.tab +++ b/ext/pytz/zoneinfo/zone1970.tab @@ -2,7 +2,7 @@ # # This file is in the public domain. # -# From Paul Eggert (2014-07-31): +# From Paul Eggert (2017-10-01): # This file contains a table where each row stands for a zone where # civil time stamps have agreed since 1970. Columns are separated by # a single tab. Lines beginning with '#' are comments. All text uses @@ -15,7 +15,7 @@ # either +-DDMM+-DDDMM or +-DDMMSS+-DDDMMSS, # first latitude (+ is north), then longitude (+ is east). # 3. Zone name used in value of TZ environment variable. -# Please see the 'Theory' file for how zone names are chosen. +# Please see the theory.html file for how zone names are chosen. # If multiple zones overlap a country, each has a row in the # table, with each column 1 containing the country code. # 4. Comments; present if and only if a country has multiple zones. @@ -316,10 +316,11 @@ RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea SA,KW,YE +2438+04643 Asia/Riyadh SB -0932+16012 Pacific/Guadalcanal SC -0440+05528 Indian/Mahe -SD,SS +1536+03232 Africa/Khartoum +SD +1536+03232 Africa/Khartoum SE +5920+01803 Europe/Stockholm SG +0117+10351 Asia/Singapore SR +0550-05510 America/Paramaribo +SS +0451+03137 Africa/Juba SV +1342-08912 America/El_Salvador SY +3330+03618 Asia/Damascus TC +2128-07108 America/Grand_Turk diff --git a/ext/setuptools/__init__.py b/ext/setuptools/__init__.py index 04f7674082..7da47fbed5 100644 --- a/ext/setuptools/__init__.py +++ b/ext/setuptools/__init__.py @@ -109,7 +109,27 @@ def _looks_like_package(path): find_packages = PackageFinder.find -setup = distutils.core.setup + +def _install_setup_requires(attrs): + # Note: do not use `setuptools.Distribution` directly, as + # our PEP 517 backend patch `distutils.core.Distribution`. + dist = distutils.core.Distribution(dict( + (k, v) for k, v in attrs.items() + if k in ('dependency_links', 'setup_requires') + )) + # Honor setup.cfg's options. + dist.parse_config_files(ignore_option_errors=True) + if dist.setup_requires: + dist.fetch_build_eggs(dist.setup_requires) + + +def setup(**attrs): + # Make sure we have any requirements needed to interpret 'attrs'. + _install_setup_requires(attrs) + return distutils.core.setup(**attrs) + +setup.__doc__ = distutils.core.setup.__doc__ + _Command = monkey.get_unpatched(distutils.core.Command) diff --git a/ext/setuptools/archive_util.py b/ext/setuptools/archive_util.py index cc82b3da36..81436044d9 100644 --- a/ext/setuptools/archive_util.py +++ b/ext/setuptools/archive_util.py @@ -8,7 +8,7 @@ import contextlib from distutils.errors import DistutilsError -from pkg_resources import ensure_directory, ContextualZipFile +from pkg_resources import ensure_directory __all__ = [ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", @@ -98,7 +98,7 @@ def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): if not zipfile.is_zipfile(filename): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - with ContextualZipFile(filename) as z: + with zipfile.ZipFile(filename) as z: for info in z.infolist(): name = info.filename diff --git a/ext/setuptools/build_meta.py b/ext/setuptools/build_meta.py new file mode 100644 index 0000000000..609ea1e510 --- /dev/null +++ b/ext/setuptools/build_meta.py @@ -0,0 +1,172 @@ +"""A PEP 517 interface to setuptools + +Previously, when a user or a command line tool (let's call it a "frontend") +needed to make a request of setuptools to take a certain action, for +example, generating a list of installation requirements, the frontend would +would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. + +PEP 517 defines a different method of interfacing with setuptools. Rather +than calling "setup.py" directly, the frontend should: + + 1. Set the current directory to the directory with a setup.py file + 2. Import this module into a safe python interpreter (one in which + setuptools can potentially set global variables or crash hard). + 3. Call one of the functions defined in PEP 517. + +What each function does is defined in PEP 517. However, here is a "casual" +definition of the functions (this definition should not be relied on for +bug reports or API stability): + + - `build_wheel`: build a wheel in the folder and return the basename + - `get_requires_for_build_wheel`: get the `setup_requires` to build + - `prepare_metadata_for_build_wheel`: get the `install_requires` + - `build_sdist`: build an sdist in the folder and return the basename + - `get_requires_for_build_sdist`: get the `setup_requires` to build + +Again, this is not a formal definition! Just a "taste" of the module. +""" + +import os +import sys +import tokenize +import shutil +import contextlib + +import setuptools +import distutils + + +class SetupRequirementsError(BaseException): + def __init__(self, specifiers): + self.specifiers = specifiers + + +class Distribution(setuptools.dist.Distribution): + def fetch_build_eggs(self, specifiers): + raise SetupRequirementsError(specifiers) + + @classmethod + @contextlib.contextmanager + def patch(cls): + """ + Replace + distutils.dist.Distribution with this class + for the duration of this context. + """ + orig = distutils.core.Distribution + distutils.core.Distribution = cls + try: + yield + finally: + distutils.core.Distribution = orig + + +def _run_setup(setup_script='setup.py'): + # Note that we can reuse our build directory between calls + # Correctness comes first, then optimization later + __file__ = setup_script + __name__ = '__main__' + f = getattr(tokenize, 'open', open)(__file__) + code = f.read().replace('\\r\\n', '\\n') + f.close() + exec(compile(code, __file__, 'exec'), locals()) + + +def _fix_config(config_settings): + config_settings = config_settings or {} + config_settings.setdefault('--global-option', []) + return config_settings + + +def _get_build_requires(config_settings): + config_settings = _fix_config(config_settings) + requirements = ['setuptools', 'wheel'] + + sys.argv = sys.argv[:1] + ['egg_info'] + \ + config_settings["--global-option"] + try: + with Distribution.patch(): + _run_setup() + except SetupRequirementsError as e: + requirements += e.specifiers + + return requirements + + +def _get_immediate_subdirectories(a_dir): + return [name for name in os.listdir(a_dir) + if os.path.isdir(os.path.join(a_dir, name))] + + +def get_requires_for_build_wheel(config_settings=None): + config_settings = _fix_config(config_settings) + return _get_build_requires(config_settings) + + +def get_requires_for_build_sdist(config_settings=None): + config_settings = _fix_config(config_settings) + return _get_build_requires(config_settings) + + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): + sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', metadata_directory] + _run_setup() + + dist_info_directory = metadata_directory + while True: + dist_infos = [f for f in os.listdir(dist_info_directory) + if f.endswith('.dist-info')] + + if len(dist_infos) == 0 and \ + len(_get_immediate_subdirectories(dist_info_directory)) == 1: + dist_info_directory = os.path.join( + dist_info_directory, os.listdir(dist_info_directory)[0]) + continue + + assert len(dist_infos) == 1 + break + + # PEP 517 requires that the .dist-info directory be placed in the + # metadata_directory. To comply, we MUST copy the directory to the root + if dist_info_directory != metadata_directory: + shutil.move( + os.path.join(dist_info_directory, dist_infos[0]), + metadata_directory) + shutil.rmtree(dist_info_directory, ignore_errors=True) + + return dist_infos[0] + + +def build_wheel(wheel_directory, config_settings=None, + metadata_directory=None): + config_settings = _fix_config(config_settings) + wheel_directory = os.path.abspath(wheel_directory) + sys.argv = sys.argv[:1] + ['bdist_wheel'] + \ + config_settings["--global-option"] + _run_setup() + if wheel_directory != 'dist': + shutil.rmtree(wheel_directory) + shutil.copytree('dist', wheel_directory) + + wheels = [f for f in os.listdir(wheel_directory) + if f.endswith('.whl')] + + assert len(wheels) == 1 + return wheels[0] + + +def build_sdist(sdist_directory, config_settings=None): + config_settings = _fix_config(config_settings) + sdist_directory = os.path.abspath(sdist_directory) + sys.argv = sys.argv[:1] + ['sdist'] + \ + config_settings["--global-option"] + _run_setup() + if sdist_directory != 'dist': + shutil.rmtree(sdist_directory) + shutil.copytree('dist', sdist_directory) + + sdists = [f for f in os.listdir(sdist_directory) + if f.endswith('.tar.gz')] + + assert len(sdists) == 1 + return sdists[0] diff --git a/ext/setuptools/command/__init__.py b/ext/setuptools/command/__init__.py index c96d33c23d..fe619e2e67 100644 --- a/ext/setuptools/command/__init__.py +++ b/ext/setuptools/command/__init__.py @@ -3,6 +3,7 @@ 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts', 'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib', + 'dist_info', ] from distutils.command.bdist import bdist diff --git a/ext/setuptools/command/bdist_egg.py b/ext/setuptools/command/bdist_egg.py index 51755d52c9..5fdb62d905 100644 --- a/ext/setuptools/command/bdist_egg.py +++ b/ext/setuptools/command/bdist_egg.py @@ -8,6 +8,7 @@ from types import CodeType import sys import os +import re import textwrap import marshal @@ -240,11 +241,26 @@ def zap_pyfiles(self): log.info("Removing .py files from temporary directory") for base, dirs, files in walk_egg(self.bdist_dir): for name in files: + path = os.path.join(base, name) + if name.endswith('.py'): - path = os.path.join(base, name) log.debug("Deleting %s", path) os.unlink(path) + if base.endswith('__pycache__'): + path_old = path + + pattern = r'(?P.+)\.(?P[^.]+)\.pyc' + m = re.match(pattern, name) + path_new = os.path.join(base, os.pardir, m.group('name') + '.pyc') + log.info("Renaming file from [%s] to [%s]" % (path_old, path_new)) + try: + os.remove(path_new) + except OSError: + pass + os.rename(path_old, path_new) + + def zip_safe(self): safe = getattr(self.distribution, 'zip_safe', None) if safe is not None: diff --git a/ext/setuptools/command/develop.py b/ext/setuptools/command/develop.py index 85b23c6080..959c932a5c 100644 --- a/ext/setuptools/command/develop.py +++ b/ext/setuptools/command/develop.py @@ -95,7 +95,9 @@ def _resolve_setup_path(egg_base, install_dir, egg_path): path_to_setup = egg_base.replace(os.sep, '/').rstrip('/') if path_to_setup != os.curdir: path_to_setup = '../' * (path_to_setup.count('/') + 1) - resolved = normalize_path(os.path.join(install_dir, egg_path, path_to_setup)) + resolved = normalize_path( + os.path.join(install_dir, egg_path, path_to_setup) + ) if resolved != normalize_path(os.curdir): raise DistutilsOptionError( "Can't get a consistent path to setup script from" diff --git a/ext/setuptools/command/dist_info.py b/ext/setuptools/command/dist_info.py new file mode 100644 index 0000000000..c45258fa03 --- /dev/null +++ b/ext/setuptools/command/dist_info.py @@ -0,0 +1,36 @@ +""" +Create a dist_info directory +As defined in the wheel specification +""" + +import os + +from distutils.core import Command +from distutils import log + + +class dist_info(Command): + + description = 'create a .dist-info directory' + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ] + + def initialize_options(self): + self.egg_base = None + + def finalize_options(self): + pass + + def run(self): + egg_info = self.get_finalized_command('egg_info') + egg_info.egg_base = self.egg_base + egg_info.finalize_options() + egg_info.run() + dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' + log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) + + bdist_wheel = self.get_finalized_command('bdist_wheel') + bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/ext/setuptools/command/easy_install.py b/ext/setuptools/command/easy_install.py index 8fba7b4115..b691e70268 100644 --- a/ext/setuptools/command/easy_install.py +++ b/ext/setuptools/command/easy_install.py @@ -53,6 +53,7 @@ PackageIndex, parse_requirement_arg, URL_SCHEME, ) from setuptools.command import bdist_egg, egg_info +from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, @@ -842,6 +843,8 @@ def install_eggs(self, spec, dist_filename, tmpdir): return [self.install_egg(dist_filename, tmpdir)] elif dist_filename.lower().endswith('.exe'): return [self.install_exe(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.whl'): + return [self.install_wheel(dist_filename, tmpdir)] # Anything else, try to extract and build setup_base = tmpdir @@ -1038,6 +1041,35 @@ def process(src, dst): f.write('\n'.join(locals()[name]) + '\n') f.close() + def install_wheel(self, wheel_path, tmpdir): + wheel = Wheel(wheel_path) + assert wheel.is_compatible() + destination = os.path.join(self.install_dir, wheel.egg_name()) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute( + os.unlink, + (destination,), + "Removing " + destination, + ) + try: + self.execute( + wheel.install_as_egg, + (destination,), + ("Installing %s to %s") % ( + os.path.basename(wheel_path), + os.path.dirname(destination) + ), + ) + finally: + update_dist_caches(destination, fix_zipimporter_caches=False) + self.add_output(destination) + return self.egg_distribution(destination) + __mv_warning = textwrap.dedent(""" Because this distribution was installed --multi-version, before you can import modules from this package in an application, you will need to @@ -1216,7 +1248,6 @@ def pf(src, dst): def byte_compile(self, to_compile): if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile @@ -1817,7 +1848,7 @@ def _update_zipimporter_cache(normalized_path, cache, updater=None): # get/del patterns instead. For more detailed information see the # following links: # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420 - # https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99 + # http://bit.ly/2h9itJX old_entry = cache[p] del cache[p] new_entry = updater and updater(p, old_entry) diff --git a/ext/setuptools/command/egg_info.py b/ext/setuptools/command/egg_info.py index a183d15db7..befa09043a 100644 --- a/ext/setuptools/command/egg_info.py +++ b/ext/setuptools/command/egg_info.py @@ -160,9 +160,7 @@ def save_version_info(self, filename): build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds. """ - # python 2.6 compatibility - odict = getattr(collections, 'OrderedDict', dict) - egg_info = odict() + egg_info = collections.OrderedDict() # follow the order these keys would have been added # when PYTHONHASHSEED=0 egg_info['tag_build'] = self.tags() @@ -599,10 +597,7 @@ def write_pkg_info(cmd, basename, filename): metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name - metadata.long_description_content_type = getattr( - cmd.distribution, - 'long_description_content_type' - ) + try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it @@ -642,7 +637,7 @@ def write_requirements(cmd, basename, filename): def write_setup_requirements(cmd, basename, filename): - data = StringIO() + data = io.StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) diff --git a/ext/setuptools/command/sdist.py b/ext/setuptools/command/sdist.py index 508148e06d..bcfae4d82f 100644 --- a/ext/setuptools/command/sdist.py +++ b/ext/setuptools/command/sdist.py @@ -51,13 +51,6 @@ def run(self): for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) - # Call check_metadata only if no 'check' command - # (distutils <= 2.6) - import distutils.command - - if 'check' not in distutils.command.__all__: - self.check_metadata() - self.make_distribution() dist_files = getattr(self.distribution, 'dist_files', []) diff --git a/ext/setuptools/command/test.py b/ext/setuptools/command/test.py index 638d0c56e4..51aee1f7b1 100644 --- a/ext/setuptools/command/test.py +++ b/ext/setuptools/command/test.py @@ -3,6 +3,7 @@ import sys import contextlib import itertools +import unittest from distutils.errors import DistutilsError, DistutilsOptionError from distutils import log from unittest import TestLoader @@ -14,10 +15,14 @@ working_set, _namespace_packages, evaluate_marker, add_activation_listener, require, EntryPoint) from setuptools import Command -from setuptools.py31compat import unittest_main class ScanningLoader(TestLoader): + + def __init__(self): + TestLoader.__init__(self) + self._visited = set() + def loadTestsFromModule(self, module, pattern=None): """Return a suite of all tests cases contained in the given module @@ -25,6 +30,10 @@ def loadTestsFromModule(self, module, pattern=None): If the module has an ``additional_tests`` function, call it and add the return value to the tests. """ + if module in self._visited: + return None + self._visited.add(module) + tests = [] tests.append(TestLoader.loadTestsFromModule(self, module)) @@ -101,6 +110,8 @@ def test_args(self): return list(self._test_args()) def _test_args(self): + if not self.test_suite and sys.version_info >= (2, 7): + yield 'discover' if self.verbose: yield '--verbose' if self.test_suite: @@ -230,12 +241,11 @@ def run_tests(self): del_modules.append(name) list(map(sys.modules.__delitem__, del_modules)) - exit_kwarg = {} if sys.version_info < (2, 7) else {"exit": False} - test = unittest_main( + test = unittest.main( None, None, self._argv, testLoader=self._resolve_as_ep(self.test_loader), testRunner=self._resolve_as_ep(self.test_runner), - **exit_kwarg + exit=False, ) if not test.result.wasSuccessful(): msg = 'Test failed: %s' % test.result diff --git a/ext/setuptools/config.py b/ext/setuptools/config.py index 9a62e2ec55..a70794a42e 100644 --- a/ext/setuptools/config.py +++ b/ext/setuptools/config.py @@ -4,9 +4,9 @@ import sys from collections import defaultdict from functools import partial +from importlib import import_module from distutils.errors import DistutilsOptionError, DistutilsFileError -from setuptools.py26compat import import_module from setuptools.extern.six import string_types @@ -404,6 +404,7 @@ def parsers(self): """Metadata item name to parser function mapping.""" parse_list = self._parse_list parse_file = self._parse_file + parse_dict = self._parse_dict return { 'platforms': parse_list, @@ -416,6 +417,7 @@ def parsers(self): 'description': parse_file, 'long_description': parse_file, 'version': self._parse_version, + 'project_urls': parse_dict, } def _parse_version(self, value): diff --git a/ext/setuptools/dist.py b/ext/setuptools/dist.py index a2ca879516..c2bfdbc7c6 100644 --- a/ext/setuptools/dist.py +++ b/ext/setuptools/dist.py @@ -44,7 +44,7 @@ def write_pkg_file(self, file): self.classifiers or self.download_url): version = '1.1' # Setuptools specific for PEP 345 - if hasattr(self, 'python_requires'): + if hasattr(self, 'python_requires') or self.project_urls: version = '1.2' file.write('Metadata-Version: %s\n' % version) @@ -57,12 +57,11 @@ def write_pkg_file(self, file): file.write('License: %s\n' % self.get_license()) if self.download_url: file.write('Download-URL: %s\n' % self.download_url) + for project_url in self.project_urls.items(): + file.write('Project-URL: %s, %s\n' % project_url) - long_desc_content_type = getattr( - self, - 'long_description_content_type', - None - ) or 'UNKNOWN' + long_desc_content_type = \ + self.long_description_content_type or 'UNKNOWN' file.write('Description-Content-Type: %s\n' % long_desc_content_type) long_desc = rfc822_escape(self.get_long_description()) @@ -166,6 +165,8 @@ def check_requirements(dist, attr, value): """Verify that install_requires is a valid requirements list""" try: list(pkg_resources.parse_requirements(value)) + if isinstance(value, (dict, set)): + raise TypeError("Unordered types are not allowed") except (TypeError, ValueError) as error: tmpl = ( "{attr!r} must be a string or list of strings " @@ -316,26 +317,29 @@ def __init__(self, attrs=None): have_package_data = hasattr(self, "package_data") if not have_package_data: self.package_data = {} - _attrs_dict = attrs or {} - if 'features' in _attrs_dict or 'require_features' in _attrs_dict: + attrs = attrs or {} + if 'features' in attrs or 'require_features' in attrs: Feature.warn_deprecated() self.require_features = [] self.features = {} self.dist_files = [] - self.src_root = attrs and attrs.pop("src_root", None) + self.src_root = attrs.pop("src_root", None) self.patch_missing_pkg_info(attrs) - self.long_description_content_type = _attrs_dict.get( - 'long_description_content_type' - ) - # Make sure we have any eggs needed to interpret 'attrs' - if attrs is not None: - self.dependency_links = attrs.pop('dependency_links', []) - assert_string_list(self, 'dependency_links', self.dependency_links) - if attrs and 'setup_requires' in attrs: - self.fetch_build_eggs(attrs['setup_requires']) + self.project_urls = attrs.get('project_urls', {}) + self.dependency_links = attrs.pop('dependency_links', []) + self.setup_requires = attrs.pop('setup_requires', []) for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): vars(self).setdefault(ep.name, None) _Distribution.__init__(self, attrs) + + # The project_urls attribute may not be supported in distutils, so + # prime it here from our value if not automatically set + self.metadata.project_urls = getattr( + self.metadata, 'project_urls', self.project_urls) + self.metadata.long_description_content_type = attrs.get( + 'long_description_content_type' + ) + if isinstance(self.metadata.version, numbers.Number): # Some people apparently take "version number" too literally :) self.metadata.version = str(self.metadata.version) @@ -427,14 +431,15 @@ def _clean_req(self, req): req.marker = None return req - def parse_config_files(self, filenames=None): + def parse_config_files(self, filenames=None, ignore_option_errors=False): """Parses configuration files from various levels and loads configuration. """ _Distribution.parse_config_files(self, filenames=filenames) - parse_configuration(self, self.command_options) + parse_configuration(self, self.command_options, + ignore_option_errors=ignore_option_errors) self._finalize_requires() def parse_command_line(self): @@ -497,19 +502,20 @@ def fetch_build_egg(self, req): """Fetch an egg needed for building""" from setuptools.command.easy_install import easy_install dist = self.__class__({'script_args': ['easy_install']}) - dist.parse_config_files() opts = dist.get_option_dict('easy_install') - keep = ( - 'find_links', 'site_dirs', 'index_url', 'optimize', - 'site_dirs', 'allow_hosts' - ) - for key in list(opts): - if key not in keep: - del opts[key] # don't use any other settings + opts.clear() + opts.update( + (k, v) + for k, v in self.get_option_dict('easy_install').items() + if k in ( + # don't use any other settings + 'find_links', 'site_dirs', 'index_url', + 'optimize', 'site_dirs', 'allow_hosts', + )) if self.dependency_links: links = self.dependency_links[:] if 'find_links' in opts: - links = opts['find_links'][1].split() + links + links = opts['find_links'][1] + links opts['find_links'] = ('setup', links) install_dir = self.get_egg_cache_dir() cmd = easy_install( diff --git a/ext/setuptools/glibc.py b/ext/setuptools/glibc.py new file mode 100644 index 0000000000..a134591c30 --- /dev/null +++ b/ext/setuptools/glibc.py @@ -0,0 +1,86 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py +from __future__ import absolute_import + +import ctypes +import re +import warnings + + +def glibc_version_string(): + "Returns glibc version string, or None if not using glibc." + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing +def check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn("Expected glibc version with 2 components major.minor," + " got: %s" % version_str, RuntimeWarning) + return False + return (int(m.group("major")) == required_major and + int(m.group("minor")) >= minimum_minor) + + +def have_compatible_glibc(required_major, minimum_minor): + version_str = glibc_version_string() + if version_str is None: + return False + return check_glibc_version(version_str, required_major, minimum_minor) + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver(): + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/ext/setuptools/monkey.py b/ext/setuptools/monkey.py index 6d3711ec1f..d9eb7d7b29 100644 --- a/ext/setuptools/monkey.py +++ b/ext/setuptools/monkey.py @@ -7,9 +7,9 @@ import platform import types import functools +from importlib import import_module import inspect -from .py26compat import import_module from setuptools.extern import six import setuptools diff --git a/ext/setuptools/package_index.py b/ext/setuptools/package_index.py index a6363b1856..ad7433078e 100644 --- a/ext/setuptools/package_index.py +++ b/ext/setuptools/package_index.py @@ -21,14 +21,14 @@ from pkg_resources import ( CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, Environment, find_distributions, safe_name, safe_version, - to_filename, Requirement, DEVELOP_DIST, + to_filename, Requirement, DEVELOP_DIST, EGG_DIST, ) from setuptools import ssl_support from distutils import log from distutils.errors import DistutilsError from fnmatch import translate -from setuptools.py26compat import strip_fragment from setuptools.py27compat import get_all_headers +from setuptools.wheel import Wheel EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) @@ -116,6 +116,17 @@ def distros_for_location(location, basename, metadata=None): if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.whl') and '-' in basename: + wheel = Wheel(basename) + if not wheel.is_compatible(): + return [] + return [Distribution( + location=location, + project_name=wheel.project_name, + version=wheel.version, + # Increase priority over eggs. + precedence=EGG_DIST + 1, + )] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: @@ -141,7 +152,7 @@ def distros_for_filename(filename, metadata=None): def interpret_distro_name( location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None - ): +): """Generate alternative interpretations of a source distro name Note: if `location` is a filesystem filename, you should call @@ -292,7 +303,7 @@ class PackageIndex(Environment): def __init__( self, index_url="https://pypi.python.org/simple", hosts=('*',), ca_bundle=None, verify_ssl=True, *args, **kw - ): + ): Environment.__init__(self, *args, **kw) self.index_url = index_url + "/" [:not index_url.endswith('/')] self.scanned_urls = {} @@ -346,7 +357,8 @@ def process_url(self, url, retrieve=False): base = f.url # handle redirects page = f.read() - if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. + if not isinstance(page, str): + # In Python 3 and got bytes but want str. if isinstance(f, urllib.error.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' @@ -381,8 +393,9 @@ def url_ok(self, url, fatal=False): is_file = s and s.group(1).lower() == 'file' if is_file or self.allows(urllib.parse.urlparse(url)[1]): return True - msg = ("\nNote: Bypassing %s (disallowed host; see " - "http://bit.ly/1dg9ijs for details).\n") + msg = ( + "\nNote: Bypassing %s (disallowed host; see " + "http://bit.ly/2hrImnY for details).\n") if fatal: raise DistutilsError(msg % url) else: @@ -500,15 +513,16 @@ def check_hash(self, checker, filename, tfp): """ checker is a ContentChecker """ - checker.report(self.debug, + checker.report( + self.debug, "Validating %%s checksum for %s" % filename) if not checker.is_valid(): tfp.close() os.unlink(filename) raise DistutilsError( "%s validation failed for %s; " - "possible download problem?" % ( - checker.hash.name, os.path.basename(filename)) + "possible download problem?" + % (checker.hash.name, os.path.basename(filename)) ) def add_find_links(self, urls): @@ -536,7 +550,8 @@ def not_found_in_index(self, requirement): if self[requirement.key]: # we've seen at least one distro meth, msg = self.info, "Couldn't retrieve index page for %r" else: # no distros seen for this name, might be misspelled - meth, msg = (self.warn, + meth, msg = ( + self.warn, "Couldn't find index page for %r (maybe misspelled?)") meth(msg, requirement.unsafe_name) self.scan_all() @@ -577,8 +592,7 @@ def download(self, spec, tmpdir): def fetch_distribution( self, requirement, tmpdir, force_scan=False, source=False, - develop_ok=False, local_index=None - ): + develop_ok=False, local_index=None): """Obtain a distribution suitable for fulfilling `requirement` `requirement` must be a ``pkg_resources.Requirement`` instance. @@ -609,12 +623,19 @@ def find(req, env=None): if dist.precedence == DEVELOP_DIST and not develop_ok: if dist not in skipped: - self.warn("Skipping development or system egg: %s", dist) + self.warn( + "Skipping development or system egg: %s", dist, + ) skipped[dist] = 1 continue - if dist in req and (dist.precedence <= SOURCE_DIST or not source): - dist.download_location = self.download(dist.location, tmpdir) + test = ( + dist in req + and (dist.precedence <= SOURCE_DIST or not source) + ) + if test: + loc = self.download(dist.location, tmpdir) + dist.download_location = loc if os.path.exists(dist.download_location): return dist @@ -704,10 +725,10 @@ def gen_setup(self, filename, fragment, tmpdir): def _download_to(self, url, filename): self.info("Downloading %s", url) # Download the file - fp, info = None, None + fp = None try: checker = HashChecker.from_url(url) - fp = self.open_url(strip_fragment(url)) + fp = self.open_url(url) if isinstance(fp, urllib.error.HTTPError): raise DistutilsError( "Can't download %s: %s %s" % (url, fp.code, fp.msg) @@ -1103,7 +1124,8 @@ def local_open(url): f += '/' files.append('{name}'.format(name=f)) else: - tmpl = ("{url}" + tmpl = ( + "{url}" "{files}") body = tmpl.format(url=url, files='\n'.join(files)) status, message = 200, "OK" diff --git a/ext/setuptools/pep425tags.py b/ext/setuptools/pep425tags.py new file mode 100644 index 0000000000..dfe55d587a --- /dev/null +++ b/ext/setuptools/pep425tags.py @@ -0,0 +1,316 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py +"""Generate and work with PEP 425 Compatibility Tags.""" +from __future__ import absolute_import + +import distutils.util +import platform +import re +import sys +import sysconfig +import warnings +from collections import OrderedDict + +from . import glibc + +_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') + + +def get_config_var(var): + try: + return sysconfig.get_config_var(var) + except IOError as e: # Issue #1074 + warnings.warn("{}".format(e), RuntimeWarning) + return None + + +def get_abbr_impl(): + """Return abbreviated implementation name.""" + if hasattr(sys, 'pypy_version_info'): + pyimpl = 'pp' + elif sys.platform.startswith('java'): + pyimpl = 'jy' + elif sys.platform == 'cli': + pyimpl = 'ip' + else: + pyimpl = 'cp' + return pyimpl + + +def get_impl_ver(): + """Return implementation version.""" + impl_ver = get_config_var("py_version_nodot") + if not impl_ver or get_abbr_impl() == 'pp': + impl_ver = ''.join(map(str, get_impl_version_info())) + return impl_ver + + +def get_impl_version_info(): + """Return sys.version_info-like tuple for use in decrementing the minor + version.""" + if get_abbr_impl() == 'pp': + # as per https://github.com/pypa/pip/issues/2882 + return (sys.version_info[0], sys.pypy_version_info.major, + sys.pypy_version_info.minor) + else: + return sys.version_info[0], sys.version_info[1] + + +def get_impl_tag(): + """ + Returns the Tag for this specific implementation. + """ + return "{}{}".format(get_abbr_impl(), get_impl_ver()) + + +def get_flag(var, fallback, expected=True, warn=True): + """Use a fallback method for determining SOABI flags if the needed config + var is unset or unavailable.""" + val = get_config_var(var) + if val is None: + if warn: + warnings.warn("Config variable '{0}' is unset, Python ABI tag may " + "be incorrect".format(var), RuntimeWarning, 2) + return fallback() + return val == expected + + +def get_abi_tag(): + """Return the ABI tag based on SOABI (if available) or emulate SOABI + (CPython 2, PyPy).""" + soabi = get_config_var('SOABI') + impl = get_abbr_impl() + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): + d = '' + m = '' + u = '' + if get_flag('Py_DEBUG', + lambda: hasattr(sys, 'gettotalrefcount'), + warn=(impl == 'cp')): + d = 'd' + if get_flag('WITH_PYMALLOC', + lambda: impl == 'cp', + warn=(impl == 'cp')): + m = 'm' + if get_flag('Py_UNICODE_SIZE', + lambda: sys.maxunicode == 0x10ffff, + expected=4, + warn=(impl == 'cp' and + sys.version_info < (3, 3))) \ + and sys.version_info < (3, 3): + u = 'u' + abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) + elif soabi and soabi.startswith('cpython-'): + abi = 'cp' + soabi.split('-')[1] + elif soabi: + abi = soabi.replace('.', '_').replace('-', '_') + else: + abi = None + return abi + + +def _is_running_32bit(): + return sys.maxsize == 2147483647 + + +def get_platform(): + """Return our platform name 'win32', 'linux_x86_64'""" + if sys.platform == 'darwin': + # distutils.util.get_platform() returns the release based on the value + # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may + # be significantly older than the user's current machine. + release, _, machine = platform.mac_ver() + split_ver = release.split('.') + + if machine == "x86_64" and _is_running_32bit(): + machine = "i386" + elif machine == "ppc64" and _is_running_32bit(): + machine = "ppc" + + return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) + + # XXX remove distutils dependency + result = distutils.util.get_platform().replace('.', '_').replace('-', '_') + if result == "linux_x86_64" and _is_running_32bit(): + # 32 bit Python program (running on a 64 bit Linux): pip should only + # install and run 32 bit compiled extensions in that case. + result = "linux_i686" + + return result + + +def is_manylinux1_compatible(): + # Only Linux, and only x86-64 / i686 + if get_platform() not in {"linux_x86_64", "linux_i686"}: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux1_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 5 uses glibc 2.5. + return glibc.have_compatible_glibc(2, 5) + + +def get_darwin_arches(major, minor, machine): + """Return a list of supported arches (including group arches) for + the given major, minor and machine architecture of an macOS machine. + """ + arches = [] + + def _supports_arch(major, minor, arch): + # Looking at the application support for macOS versions in the chart + # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears + # our timeline looks roughly like: + # + # 10.0 - Introduces ppc support. + # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 + # and x86_64 support is CLI only, and cannot be used for GUI + # applications. + # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. + # 10.6 - Drops support for ppc64 + # 10.7 - Drops support for ppc + # + # Given that we do not know if we're installing a CLI or a GUI + # application, we must be conservative and assume it might be a GUI + # application and behave as if ppc64 and x86_64 support did not occur + # until 10.5. + # + # Note: The above information is taken from the "Application support" + # column in the chart not the "Processor support" since I believe + # that we care about what instruction sets an application can use + # not which processors the OS supports. + if arch == 'ppc': + return (major, minor) <= (10, 5) + if arch == 'ppc64': + return (major, minor) == (10, 5) + if arch == 'i386': + return (major, minor) >= (10, 4) + if arch == 'x86_64': + return (major, minor) >= (10, 5) + if arch in groups: + for garch in groups[arch]: + if _supports_arch(major, minor, garch): + return True + return False + + groups = OrderedDict([ + ("fat", ("i386", "ppc")), + ("intel", ("x86_64", "i386")), + ("fat64", ("x86_64", "ppc64")), + ("fat32", ("x86_64", "i386", "ppc")), + ]) + + if _supports_arch(major, minor, machine): + arches.append(machine) + + for garch in groups: + if machine in groups[garch] and _supports_arch(major, minor, garch): + arches.append(garch) + + arches.append('universal') + + return arches + + +def get_supported(versions=None, noarch=False, platform=None, + impl=None, abi=None): + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + :param platform: specify the exact platform you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abi: specify the exact abi you want valid + tags for, or None. If None, use the local interpreter abi. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + versions = [] + version_info = get_impl_version_info() + major = version_info[:-1] + # Support all previous minor Python versions. + for minor in range(version_info[-1], -1, -1): + versions.append(''.join(map(str, major + (minor,)))) + + impl = impl or get_abbr_impl() + + abis = [] + + abi = abi or get_abi_tag() + if abi: + abis[0:0] = [abi] + + abi3s = set() + import imp + for suffix in imp.get_suffixes(): + if suffix[0].startswith('.abi'): + abi3s.add(suffix[0].split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + if not noarch: + arch = platform or get_platform() + if arch.startswith('macosx'): + # support macosx-10.6-intel on macosx-10.9-x86_64 + match = _osx_arch_pat.match(arch) + if match: + name, major, minor, actual_arch = match.groups() + tpl = '{}_{}_%i_%s'.format(name, major) + arches = [] + for m in reversed(range(int(minor) + 1)): + for a in get_darwin_arches(int(major), m, actual_arch): + arches.append(tpl % (m, a)) + else: + # arch pattern didn't match (?!) + arches = [arch] + elif platform is None and is_manylinux1_compatible(): + arches = [arch.replace('linux', 'manylinux1'), arch] + else: + arches = [arch] + + # Current version, current API (built specifically for our Python): + for abi in abis: + for arch in arches: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in {'31', '30'}: + break + for abi in abi3s: # empty set if not Python 3 + for arch in arches: + supported.append(("%s%s" % (impl, version), abi, arch)) + + # Has binaries, does not use the Python API: + for arch in arches: + supported.append(('py%s' % (versions[0][0]), 'none', arch)) + + # No abi / arch, but requires our implementation: + supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported + + +implementation_tag = get_impl_tag() diff --git a/ext/setuptools/py26compat.py b/ext/setuptools/py26compat.py deleted file mode 100644 index 4d3add8ca8..0000000000 --- a/ext/setuptools/py26compat.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Compatibility Support for Python 2.6 and earlier -""" - -import sys - -try: - from urllib.parse import splittag -except ImportError: - from urllib import splittag - - -def strip_fragment(url): - """ - In `Python 8280 `_, Python 2.7 and - later was patched to disregard the fragment when making URL requests. - Do the same for Python 2.6 and earlier. - """ - url, fragment = splittag(url) - return url - - -if sys.version_info >= (2, 7): - strip_fragment = lambda x: x - -try: - from importlib import import_module -except ImportError: - - def import_module(module_name): - return __import__(module_name, fromlist=['__name__']) diff --git a/ext/setuptools/py31compat.py b/ext/setuptools/py31compat.py index 44b025d4b2..4ea953201f 100644 --- a/ext/setuptools/py31compat.py +++ b/ext/setuptools/py31compat.py @@ -1,6 +1,3 @@ -import sys -import unittest - __all__ = ['get_config_vars', 'get_path'] try: @@ -42,15 +39,3 @@ def __exit__(self, exctype, excvalue, exctrace): except OSError: # removal errors are not the only possible pass self.name = None - - -unittest_main = unittest.main - -_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2) -if _PY31: - # on Python 3.1, translate testRunner==None to TextTestRunner - # for compatibility with Python 2.6, 2.7, and 3.2+ - def unittest_main(*args, **kwargs): - if 'testRunner' in kwargs and kwargs['testRunner'] is None: - kwargs['testRunner'] = unittest.TextTestRunner - return unittest.main(*args, **kwargs) diff --git a/ext/setuptools/sandbox.py b/ext/setuptools/sandbox.py index 1d981f497f..685f3f72e3 100644 --- a/ext/setuptools/sandbox.py +++ b/ext/setuptools/sandbox.py @@ -39,10 +39,6 @@ def _execfile(filename, globals, locals=None): mode = 'rb' with open(filename, mode) as stream: script = stream.read() - # compile() function in Python 2.6 and 3.1 requires LF line endings. - if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2): - script = script.replace(b'\r\n', b'\n') - script = script.replace(b'\r', b'\n') if locals is None: locals = globals code = compile(script, filename, 'exec') diff --git a/ext/setuptools/ssl_support.py b/ext/setuptools/ssl_support.py index 72b18ef266..6362f1f426 100644 --- a/ext/setuptools/ssl_support.py +++ b/ext/setuptools/ssl_support.py @@ -186,9 +186,14 @@ def connect(self): else: actual_host = self.host - self.sock = ssl.wrap_socket( - sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle - ) + if hasattr(ssl, 'create_default_context'): + ctx = ssl.create_default_context(cafile=self.ca_bundle) + self.sock = ctx.wrap_socket(sock, server_hostname=actual_host) + else: + # This is for python < 2.7.9 and < 3.4? + self.sock = ssl.wrap_socket( + sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle + ) try: match_hostname(self.sock.getpeercert(), actual_host) except CertificateError: diff --git a/ext/setuptools/wheel.py b/ext/setuptools/wheel.py new file mode 100644 index 0000000000..9ffe434ae3 --- /dev/null +++ b/ext/setuptools/wheel.py @@ -0,0 +1,163 @@ +'''Wheels support.''' + +from distutils.util import get_platform +import email +import itertools +import os +import re +import zipfile + +from pkg_resources import Distribution, PathMetadata, parse_version +from pkg_resources.extern.six import PY3 +from setuptools import Distribution as SetuptoolsDistribution +from setuptools import pep425tags +from setuptools.command.egg_info import write_requirements + + +WHEEL_NAME = re.compile( + r"""^(?P.+?)-(?P\d.*?) + ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) + )\.whl$""", +re.VERBOSE).match + +NAMESPACE_PACKAGE_INIT = '''\ +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + __path__ = __import__('pkgutil').extend_path(__path__, __name__) +''' + + +def unpack(src_dir, dst_dir): + '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' + for dirpath, dirnames, filenames in os.walk(src_dir): + subdir = os.path.relpath(dirpath, src_dir) + for f in filenames: + src = os.path.join(dirpath, f) + dst = os.path.join(dst_dir, subdir, f) + os.renames(src, dst) + for n, d in reversed(list(enumerate(dirnames))): + src = os.path.join(dirpath, d) + dst = os.path.join(dst_dir, subdir, d) + if not os.path.exists(dst): + # Directory does not exist in destination, + # rename it and prune it from os.walk list. + os.renames(src, dst) + del dirnames[n] + # Cleanup. + for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): + assert not filenames + os.rmdir(dirpath) + + +class Wheel(object): + + def __init__(self, filename): + match = WHEEL_NAME(os.path.basename(filename)) + if match is None: + raise ValueError('invalid wheel name: %r' % filename) + self.filename = filename + for k, v in match.groupdict().items(): + setattr(self, k, v) + + def tags(self): + '''List tags (py_version, abi, platform) supported by this wheel.''' + return itertools.product(self.py_version.split('.'), + self.abi.split('.'), + self.platform.split('.')) + + def is_compatible(self): + '''Is the wheel is compatible with the current platform?''' + supported_tags = pep425tags.get_supported() + return next((True for t in self.tags() if t in supported_tags), False) + + def egg_name(self): + return Distribution( + project_name=self.project_name, version=self.version, + platform=(None if self.platform == 'any' else get_platform()), + ).egg_name() + '.egg' + + def install_as_egg(self, destination_eggdir): + '''Install wheel as an egg directory.''' + with zipfile.ZipFile(self.filename) as zf: + dist_basename = '%s-%s' % (self.project_name, self.version) + dist_info = '%s.dist-info' % dist_basename + dist_data = '%s.data' % dist_basename + def get_metadata(name): + with zf.open('%s/%s' % (dist_info, name)) as fp: + value = fp.read().decode('utf-8') if PY3 else fp.read() + return email.parser.Parser().parsestr(value) + wheel_metadata = get_metadata('WHEEL') + dist_metadata = get_metadata('METADATA') + # Check wheel format version is supported. + wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) + if not parse_version('1.0') <= wheel_version < parse_version('2.0dev0'): + raise ValueError('unsupported wheel format version: %s' % wheel_version) + # Extract to target directory. + os.mkdir(destination_eggdir) + zf.extractall(destination_eggdir) + # Convert metadata. + dist_info = os.path.join(destination_eggdir, dist_info) + dist = Distribution.from_location( + destination_eggdir, dist_info, + metadata=PathMetadata(destination_eggdir, dist_info) + ) + # Note: we need to evaluate and strip markers now, + # as we can't easily convert back from the syntax: + # foobar; "linux" in sys_platform and extra == 'test' + def raw_req(req): + req.marker = None + return str(req) + install_requires = list(sorted(map(raw_req, dist.requires()))) + extras_require = { + extra: list(sorted( + req + for req in map(raw_req, dist.requires((extra,))) + if req not in install_requires + )) + for extra in dist.extras + } + egg_info = os.path.join(destination_eggdir, 'EGG-INFO') + os.rename(dist_info, egg_info) + os.rename(os.path.join(egg_info, 'METADATA'), + os.path.join(egg_info, 'PKG-INFO')) + setup_dist = SetuptoolsDistribution(attrs=dict( + install_requires=install_requires, + extras_require=extras_require, + )) + write_requirements(setup_dist.get_command_obj('egg_info'), + None, os.path.join(egg_info, 'requires.txt')) + # Move data entries to their correct location. + dist_data = os.path.join(destination_eggdir, dist_data) + dist_data_scripts = os.path.join(dist_data, 'scripts') + if os.path.exists(dist_data_scripts): + egg_info_scripts = os.path.join(destination_eggdir, + 'EGG-INFO', 'scripts') + os.mkdir(egg_info_scripts) + for entry in os.listdir(dist_data_scripts): + # Remove bytecode, as it's not properly handled + # during easy_install scripts install phase. + if entry.endswith('.pyc'): + os.unlink(os.path.join(dist_data_scripts, entry)) + else: + os.rename(os.path.join(dist_data_scripts, entry), + os.path.join(egg_info_scripts, entry)) + os.rmdir(dist_data_scripts) + for subdir in filter(os.path.exists, ( + os.path.join(dist_data, d) + for d in ('data', 'headers', 'purelib', 'platlib') + )): + unpack(subdir, destination_eggdir) + if os.path.exists(dist_data): + os.rmdir(dist_data) + # Fix namespace packages. + namespace_packages = os.path.join(egg_info, 'namespace_packages.txt') + if os.path.exists(namespace_packages): + with open(namespace_packages) as fp: + namespace_packages = fp.read().split() + for mod in namespace_packages: + mod_dir = os.path.join(destination_eggdir, *mod.split('.')) + mod_init = os.path.join(mod_dir, '__init__.py') + if os.path.exists(mod_dir) and not os.path.exists(mod_init): + with open(mod_init, 'w') as fp: + fp.write(NAMESPACE_PACKAGE_INIT) diff --git a/ext/tzlocal/__init__.py b/ext/tzlocal/__init__.py index df7a66b912..c8196d66d9 100644 --- a/ext/tzlocal/__init__.py +++ b/ext/tzlocal/__init__.py @@ -1,7 +1,5 @@ import sys if sys.platform == 'win32': from tzlocal.win32 import get_localzone, reload_localzone -elif 'darwin' in sys.platform: - from tzlocal.darwin import get_localzone, reload_localzone else: from tzlocal.unix import get_localzone, reload_localzone diff --git a/ext/tzlocal/darwin.py b/ext/tzlocal/darwin.py deleted file mode 100644 index 4e8540bc08..0000000000 --- a/ext/tzlocal/darwin.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import with_statement -import os -import pytz -import subprocess -import sys - -_cache_tz = None - -if sys.version_info[0] == 2: - - class Popen(subprocess.Popen): - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if self.stdout: - self.stdout.close() - if self.stderr: - self.stderr.close() - if self.stdin: - self.stdin.close() - # Wait for the process to terminate, to avoid zombies. - self.wait() - -else: - from subprocess import Popen - - -def _get_localzone(_root='/'): - with Popen( - "systemsetup -gettimezone", - shell=True, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE - ) as pipe: - tzname = pipe.stdout.read().replace(b'Time Zone: ', b'').strip() - - if not tzname or tzname not in pytz.all_timezones_set: - # link will be something like /usr/share/zoneinfo/America/Los_Angeles. - link = os.readlink(os.path.join(_root, "etc/localtime")) - tzname = link[link.rfind("zoneinfo/") + 9:] - - return pytz.timezone(tzname) - - -def get_localzone(): - """Get the computers configured local timezone, if any.""" - global _cache_tz - if _cache_tz is None: - _cache_tz = _get_localzone() - return _cache_tz - - -def reload_localzone(): - """Reload the cached localzone. You need to call this if the timezone has changed.""" - global _cache_tz - _cache_tz = _get_localzone() - return _cache_tz diff --git a/ext/tzlocal/test_data/timezone/etc/timezone b/ext/tzlocal/test_data/timezone/etc/timezone index 28b3372d20..e7003c4452 100644 --- a/ext/tzlocal/test_data/timezone/etc/timezone +++ b/ext/tzlocal/test_data/timezone/etc/timezone @@ -1 +1 @@ -Africa/Harare +Africa/Harare# We allow comments. It's unusual, but has happened diff --git a/ext/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo b/ext/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo index 28b3372d20..028ec1d7ac 100644 --- a/ext/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo +++ b/ext/tzlocal/test_data/vardbzoneinfo/var/db/zoneinfo @@ -1 +1 @@ -Africa/Harare +Africa/Harare localhost # The host is not a part of the format, but is allowed. diff --git a/ext/tzlocal/tests.py b/ext/tzlocal/tests.py index 3528b915f1..ace7ffe63b 100644 --- a/ext/tzlocal/tests.py +++ b/ext/tzlocal/tests.py @@ -1,10 +1,12 @@ -import sys +import mock import os -from datetime import datetime -import unittest import pytz +import sys import tzlocal.unix -import tzlocal.darwin +import unittest + +from datetime import datetime + class TzLocalTests(unittest.TestCase): def setUp(self): @@ -32,6 +34,16 @@ def test_env(self): # Non-zoneinfo timezones are not supported in the TZ environment. self.assertRaises(pytz.UnknownTimeZoneError, tzlocal.unix._tz_from_env, 'GMT+03:00') + # Test the _try function + os.environ['TZ'] = 'Africa/Harare' + tz_harare = tzlocal.unix._try_tz_from_env() + self.assertEqual(tz_harare.zone, 'Africa/Harare') + # With a zone that doesn't exist + os.environ['TZ'] = 'Just Nonsense' + tz_harare = tzlocal.unix._try_tz_from_env() + self.assertIsNone(tz_harare) + + def test_timezone(self): # Most versions of Ubuntu @@ -63,18 +75,26 @@ def test_vardbzoneinfo_setting(self): self.assertEqual(tz.zone, 'Africa/Harare') def test_only_localtime(self): - tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data', 'localtime')) self.assertEqual(tz.zone, 'local') dt = datetime(2012, 1, 1, 5) self.assertEqual(pytz.timezone('Africa/Harare').localize(dt), tz.localize(dt)) - def test_darwin(self): - # Basic test of OS X code. The systemcall will fail if this is not OS X - # And a symlink will be used. This means that on OS X, this test will - # actually fail! No matter, this exersizes the code. - tz = tzlocal.darwin._get_localzone(_root=os.path.join(self.path, 'test_data', 'symlink_localtime')) - self.assertEqual(tz.zone, 'Africa/Harare') + def test_get_reload(self): + os.environ['TZ'] = 'Africa/Harare' + tz_harare = tzlocal.unix.get_localzone() + self.assertEqual(tz_harare.zone, 'Africa/Harare') + # Changing the TZ makes no difference, because it's cached + os.environ['TZ'] = 'Africa/Johannesburg' + tz_harare = tzlocal.unix.get_localzone() + self.assertEqual(tz_harare.zone, 'Africa/Harare') + # So we reload it + tz_harare = tzlocal.unix.reload_localzone() + self.assertEqual(tz_harare.zone, 'Africa/Johannesburg') + + def test_fail(self): + with self.assertRaises(pytz.exceptions.UnknownTimeZoneError): + tz = tzlocal.unix._get_localzone(_root=os.path.join(self.path, 'test_data')) if sys.platform == 'win32': @@ -84,5 +104,31 @@ class TzWin32Tests(unittest.TestCase): def test_win32(self): tzlocal.win32.get_localzone() +else: + + class TzWin32Tests(unittest.TestCase): + + def test_win32_on_unix(self): + # Yes, winreg is all mocked out, but this test means we at least + # catch syntax errors, etc. + winreg = mock.MagicMock() + winreg.OpenKey = mock.MagicMock() + winreg.OpenKey.close = mock.MagicMock() + winreg.QueryInfoKey = mock.MagicMock(return_value=(1, 1)) + winreg.EnumValue = mock.MagicMock( + return_value=('TimeZoneKeyName','Belarus Standard Time')) + winreg.EnumKey = mock.Mock(return_value='Bahia Standard Time') + sys.modules['winreg'] = winreg + import tzlocal.win32 + tz = tzlocal.win32.get_localzone() + self.assertEqual(tz.zone, 'Europe/Minsk') + + tzlocal.win32.valuestodict = mock.Mock(return_value={ + 'StandardName': 'Mocked Standard Time', + 'Std': 'Mocked Standard Time', + }) + tz = tzlocal.win32.reload_localzone() + self.assertEqual(tz.zone, 'America/Bahia') + if __name__ == '__main__': unittest.main() diff --git a/ext/tzlocal/unix.py b/ext/tzlocal/unix.py index 0d3f4e888e..9f7a706f8e 100644 --- a/ext/tzlocal/unix.py +++ b/ext/tzlocal/unix.py @@ -1,4 +1,3 @@ -from __future__ import with_statement import os import re import pytz @@ -24,6 +23,16 @@ def _tz_from_env(tzenv): "tzlocal() does not support non-zoneinfo timezones like %s. \n" "Please use a timezone in the form of Continent/City") + +def _try_tz_from_env(): + tzenv = os.environ.get('TZ') + if tzenv: + try: + return _tz_from_env(tzenv) + except pytz.UnknownTimeZoneError: + pass + + def _get_localzone(_root='/'): """Tries to find the local timezone configuration. @@ -35,12 +44,9 @@ def _get_localzone(_root='/'): beneath the _root directory. This is primarily used by the tests. In normal usage you call the function without parameters.""" - tzenv = os.environ.get('TZ') + tzenv = _try_tz_from_env() if tzenv: - try: - return _tz_from_env(tzenv) - except pytz.UnknownTimeZoneError: - pass + return tzenv # Now look for distribution specific configuration files # that contain the timezone name. @@ -52,14 +58,16 @@ def _get_localzone(_root='/'): # Issue #3 was that /etc/timezone was a zoneinfo file. # That's a misconfiguration, but we need to handle it gracefully: - if data[:5] != 'TZif2': - etctz = data.strip().decode() - # Get rid of host definitions and comments: - if ' ' in etctz: - etctz, dummy = etctz.split(' ', 1) - if '#' in etctz: - etctz, dummy = etctz.split('#', 1) - return pytz.timezone(etctz.replace(' ', '_')) + if data[:5] == 'TZif2': + continue + + etctz = data.strip().decode() + # Get rid of host definitions and comments: + if ' ' in etctz: + etctz, dummy = etctz.split(' ', 1) + if '#' in etctz: + etctz, dummy = etctz.split('#', 1) + return pytz.timezone(etctz.replace(' ', '_')) # CentOS has a ZONE setting in /etc/sysconfig/clock, # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and diff --git a/ext/tzlocal/win32.py b/ext/tzlocal/win32.py index 63445cd782..86dd99fc0b 100644 --- a/ext/tzlocal/win32.py +++ b/ext/tzlocal/win32.py @@ -29,6 +29,7 @@ def get_localzone_name(): localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) keyvalues = valuestodict(localtz) localtz.Close() + if 'TimeZoneKeyName' in keyvalues: # Windows 7 (and Vista?) @@ -91,3 +92,4 @@ def reload_localzone(): """Reload the cached localzone. You need to call this if the timezone has changed.""" global _cache_tz _cache_tz = pytz.timezone(get_localzone_name()) + return _cache_tz diff --git a/ext/tzlocal/windows_tz.py b/ext/tzlocal/windows_tz.py index de89c85676..123980b80b 100644 --- a/ext/tzlocal/windows_tz.py +++ b/ext/tzlocal/windows_tz.py @@ -1,4 +1,4 @@ -# This file is autogenerated by the get_windows_info.py script +# This file is autogenerated by the update_windows_mapping.py script # Do not edit. win_tz = {'AUS Central Standard Time': 'Australia/Darwin', 'AUS Eastern Standard Time': 'Australia/Sydney', @@ -63,6 +63,7 @@ 'Line Islands Standard Time': 'Pacific/Kiritimati', 'Lord Howe Standard Time': 'Australia/Lord_Howe', 'Magadan Standard Time': 'Asia/Magadan', + 'Magallanes Standard Time': 'America/Punta_Arenas', 'Marquesas Standard Time': 'Pacific/Marquesas', 'Mauritius Standard Time': 'Indian/Mauritius', 'Middle East Standard Time': 'Asia/Beirut', @@ -98,6 +99,7 @@ 'Saint Pierre Standard Time': 'America/Miquelon', 'Sakhalin Standard Time': 'Asia/Sakhalin', 'Samoa Standard Time': 'Pacific/Apia', + 'Saratov Standard Time': 'Europe/Saratov', 'Singapore Standard Time': 'Asia/Singapore', 'South Africa Standard Time': 'Africa/Johannesburg', 'Sri Lanka Standard Time': 'Asia/Colombo', @@ -115,6 +117,7 @@ 'US Mountain Standard Time': 'America/Phoenix', 'UTC': 'Etc/GMT', 'UTC+12': 'Etc/GMT-12', + 'UTC+13': 'Etc/GMT-13', 'UTC-02': 'Etc/GMT+2', 'UTC-08': 'Etc/GMT+8', 'UTC-09': 'Etc/GMT+9', @@ -183,6 +186,7 @@ 'Africa/Ouagadougou': 'Greenwich Standard Time', 'Africa/Porto-Novo': 'W. Central Africa Standard Time', 'Africa/Sao_Tome': 'Greenwich Standard Time', + 'Africa/Timbuktu': 'Greenwich Standard Time', 'Africa/Tripoli': 'Libya Standard Time', 'Africa/Tunis': 'W. Central Africa Standard Time', 'Africa/Windhoek': 'Namibia Standard Time', @@ -200,6 +204,7 @@ 'America/Argentina/Ushuaia': 'Argentina Standard Time', 'America/Aruba': 'SA Western Standard Time', 'America/Asuncion': 'Paraguay Standard Time', + 'America/Atka': 'Aleutian Standard Time', 'America/Bahia': 'Bahia Standard Time', 'America/Bahia_Banderas': 'Central Standard Time (Mexico)', 'America/Barbados': 'SA Western Standard Time', @@ -234,6 +239,7 @@ 'America/Edmonton': 'Mountain Standard Time', 'America/Eirunepe': 'SA Pacific Standard Time', 'America/El_Salvador': 'Central America Standard Time', + 'America/Ensenada': 'Pacific Standard Time (Mexico)', 'America/Fort_Nelson': 'US Mountain Standard Time', 'America/Fortaleza': 'SA Eastern Standard Time', 'America/Glace_Bay': 'Atlantic Standard Time', @@ -262,6 +268,7 @@ 'America/Jujuy': 'Argentina Standard Time', 'America/Juneau': 'Alaskan Standard Time', 'America/Kentucky/Monticello': 'Eastern Standard Time', + 'America/Knox_IN': 'Central Standard Time', 'America/Kralendijk': 'SA Western Standard Time', 'America/La_Paz': 'SA Western Standard Time', 'America/Lima': 'SA Pacific Standard Time', @@ -301,9 +308,10 @@ 'America/Phoenix': 'US Mountain Standard Time', 'America/Port-au-Prince': 'Haiti Standard Time', 'America/Port_of_Spain': 'SA Western Standard Time', + 'America/Porto_Acre': 'SA Pacific Standard Time', 'America/Porto_Velho': 'SA Western Standard Time', 'America/Puerto_Rico': 'SA Western Standard Time', - 'America/Punta_Arenas': 'SA Eastern Standard Time', + 'America/Punta_Arenas': 'Magallanes Standard Time', 'America/Rainy_River': 'Central Standard Time', 'America/Rankin_Inlet': 'Central Standard Time', 'America/Recife': 'SA Eastern Standard Time', @@ -316,6 +324,7 @@ 'America/Santo_Domingo': 'SA Western Standard Time', 'America/Sao_Paulo': 'E. South America Standard Time', 'America/Scoresbysund': 'Azores Standard Time', + 'America/Shiprock': 'Mountain Standard Time', 'America/Sitka': 'Alaskan Standard Time', 'America/St_Barthelemy': 'SA Western Standard Time', 'America/St_Johns': 'Newfoundland Standard Time', @@ -331,6 +340,7 @@ 'America/Toronto': 'Eastern Standard Time', 'America/Tortola': 'SA Western Standard Time', 'America/Vancouver': 'Pacific Standard Time', + 'America/Virgin': 'SA Western Standard Time', 'America/Whitehorse': 'Pacific Standard Time', 'America/Winnipeg': 'Central Standard Time', 'America/Yakutat': 'Alaskan Standard Time', @@ -341,8 +351,9 @@ 'Antarctica/Macquarie': 'Central Pacific Standard Time', 'Antarctica/Mawson': 'West Asia Standard Time', 'Antarctica/McMurdo': 'New Zealand Standard Time', - 'Antarctica/Palmer': 'SA Eastern Standard Time', + 'Antarctica/Palmer': 'Magallanes Standard Time', 'Antarctica/Rothera': 'SA Eastern Standard Time', + 'Antarctica/South_Pole': 'New Zealand Standard Time', 'Antarctica/Syowa': 'E. Africa Standard Time', 'Antarctica/Vostok': 'Central Asia Standard Time', 'Arctic/Longyearbyen': 'W. Europe Standard Time', @@ -353,6 +364,7 @@ 'Asia/Aqtau': 'West Asia Standard Time', 'Asia/Aqtobe': 'West Asia Standard Time', 'Asia/Ashgabat': 'West Asia Standard Time', + 'Asia/Ashkhabad': 'West Asia Standard Time', 'Asia/Atyrau': 'West Asia Standard Time', 'Asia/Baghdad': 'Arabic Standard Time', 'Asia/Bahrain': 'Arab Standard Time', @@ -365,7 +377,10 @@ 'Asia/Calcutta': 'India Standard Time', 'Asia/Chita': 'Transbaikal Standard Time', 'Asia/Choibalsan': 'Ulaanbaatar Standard Time', + 'Asia/Chongqing': 'China Standard Time', + 'Asia/Chungking': 'China Standard Time', 'Asia/Colombo': 'Sri Lanka Standard Time', + 'Asia/Dacca': 'Bangladesh Standard Time', 'Asia/Damascus': 'Syria Standard Time', 'Asia/Dhaka': 'Bangladesh Standard Time', 'Asia/Dili': 'Tokyo Standard Time', @@ -373,6 +388,7 @@ 'Asia/Dushanbe': 'West Asia Standard Time', 'Asia/Famagusta': 'Turkey Standard Time', 'Asia/Gaza': 'West Bank Standard Time', + 'Asia/Harbin': 'China Standard Time', 'Asia/Hebron': 'West Bank Standard Time', 'Asia/Hong_Kong': 'China Standard Time', 'Asia/Hovd': 'W. Mongolia Standard Time', @@ -383,12 +399,14 @@ 'Asia/Kabul': 'Afghanistan Standard Time', 'Asia/Kamchatka': 'Russia Time Zone 11', 'Asia/Karachi': 'Pakistan Standard Time', + 'Asia/Kashgar': 'Central Asia Standard Time', 'Asia/Katmandu': 'Nepal Standard Time', 'Asia/Khandyga': 'Yakutsk Standard Time', 'Asia/Krasnoyarsk': 'North Asia Standard Time', 'Asia/Kuala_Lumpur': 'Singapore Standard Time', 'Asia/Kuching': 'Singapore Standard Time', 'Asia/Kuwait': 'Arab Standard Time', + 'Asia/Macao': 'China Standard Time', 'Asia/Macau': 'China Standard Time', 'Asia/Magadan': 'Magadan Standard Time', 'Asia/Makassar': 'Singapore Standard Time', @@ -417,10 +435,14 @@ 'Asia/Tashkent': 'West Asia Standard Time', 'Asia/Tbilisi': 'Georgian Standard Time', 'Asia/Tehran': 'Iran Standard Time', + 'Asia/Tel_Aviv': 'Israel Standard Time', + 'Asia/Thimbu': 'Bangladesh Standard Time', 'Asia/Thimphu': 'Bangladesh Standard Time', 'Asia/Tokyo': 'Tokyo Standard Time', 'Asia/Tomsk': 'Tomsk Standard Time', + 'Asia/Ujung_Pandang': 'Singapore Standard Time', 'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time', + 'Asia/Ulan_Bator': 'Ulaanbaatar Standard Time', 'Asia/Urumqi': 'Central Asia Standard Time', 'Asia/Ust-Nera': 'Vladivostok Standard Time', 'Asia/Vientiane': 'SE Asia Standard Time', @@ -433,25 +455,54 @@ 'Atlantic/Canary': 'GMT Standard Time', 'Atlantic/Cape_Verde': 'Cape Verde Standard Time', 'Atlantic/Faeroe': 'GMT Standard Time', + 'Atlantic/Jan_Mayen': 'W. Europe Standard Time', 'Atlantic/Madeira': 'GMT Standard Time', 'Atlantic/Reykjavik': 'Greenwich Standard Time', 'Atlantic/South_Georgia': 'UTC-02', 'Atlantic/St_Helena': 'Greenwich Standard Time', 'Atlantic/Stanley': 'SA Eastern Standard Time', + 'Australia/ACT': 'AUS Eastern Standard Time', 'Australia/Adelaide': 'Cen. Australia Standard Time', 'Australia/Brisbane': 'E. Australia Standard Time', 'Australia/Broken_Hill': 'Cen. Australia Standard Time', + 'Australia/Canberra': 'AUS Eastern Standard Time', 'Australia/Currie': 'Tasmania Standard Time', 'Australia/Darwin': 'AUS Central Standard Time', 'Australia/Eucla': 'Aus Central W. Standard Time', 'Australia/Hobart': 'Tasmania Standard Time', + 'Australia/LHI': 'Lord Howe Standard Time', 'Australia/Lindeman': 'E. Australia Standard Time', 'Australia/Lord_Howe': 'Lord Howe Standard Time', 'Australia/Melbourne': 'AUS Eastern Standard Time', + 'Australia/NSW': 'AUS Eastern Standard Time', + 'Australia/North': 'AUS Central Standard Time', 'Australia/Perth': 'W. Australia Standard Time', + 'Australia/Queensland': 'E. Australia Standard Time', + 'Australia/South': 'Cen. Australia Standard Time', 'Australia/Sydney': 'AUS Eastern Standard Time', + 'Australia/Tasmania': 'Tasmania Standard Time', + 'Australia/Victoria': 'AUS Eastern Standard Time', + 'Australia/West': 'W. Australia Standard Time', + 'Australia/Yancowinna': 'Cen. Australia Standard Time', + 'Brazil/Acre': 'SA Pacific Standard Time', + 'Brazil/DeNoronha': 'UTC-02', + 'Brazil/East': 'E. South America Standard Time', + 'Brazil/West': 'SA Western Standard Time', 'CST6CDT': 'Central Standard Time', + 'Canada/Atlantic': 'Atlantic Standard Time', + 'Canada/Central': 'Central Standard Time', + 'Canada/Eastern': 'Eastern Standard Time', + 'Canada/Mountain': 'Mountain Standard Time', + 'Canada/Newfoundland': 'Newfoundland Standard Time', + 'Canada/Pacific': 'Pacific Standard Time', + 'Canada/Saskatchewan': 'Canada Central Standard Time', + 'Canada/Yukon': 'Pacific Standard Time', + 'Chile/Continental': 'Pacific SA Standard Time', + 'Chile/EasterIsland': 'Easter Island Standard Time', + 'Cuba': 'Cuba Standard Time', 'EST5EDT': 'Eastern Standard Time', + 'Egypt': 'Egypt Standard Time', + 'Eire': 'GMT Standard Time', 'Etc/GMT': 'UTC', 'Etc/GMT+1': 'Cape Verde Standard Time', 'Etc/GMT+10': 'Hawaiian Standard Time', @@ -469,7 +520,7 @@ 'Etc/GMT-10': 'West Pacific Standard Time', 'Etc/GMT-11': 'Central Pacific Standard Time', 'Etc/GMT-12': 'UTC+12', - 'Etc/GMT-13': 'Tonga Standard Time', + 'Etc/GMT-13': 'UTC+13', 'Etc/GMT-14': 'Line Islands Standard Time', 'Etc/GMT-2': 'South Africa Standard Time', 'Etc/GMT-3': 'E. Africa Standard Time', @@ -484,6 +535,7 @@ 'Europe/Andorra': 'W. Europe Standard Time', 'Europe/Astrakhan': 'Astrakhan Standard Time', 'Europe/Athens': 'GTB Standard Time', + 'Europe/Belfast': 'GMT Standard Time', 'Europe/Belgrade': 'Central Europe Standard Time', 'Europe/Berlin': 'W. Europe Standard Time', 'Europe/Bratislava': 'Central Europe Standard Time', @@ -522,13 +574,14 @@ 'Europe/Samara': 'Russia Time Zone 3', 'Europe/San_Marino': 'W. Europe Standard Time', 'Europe/Sarajevo': 'Central European Standard Time', - 'Europe/Saratov': 'Astrakhan Standard Time', + 'Europe/Saratov': 'Saratov Standard Time', 'Europe/Simferopol': 'Russian Standard Time', 'Europe/Skopje': 'Central European Standard Time', 'Europe/Sofia': 'FLE Standard Time', 'Europe/Stockholm': 'W. Europe Standard Time', 'Europe/Tallinn': 'FLE Standard Time', 'Europe/Tirane': 'Central Europe Standard Time', + 'Europe/Tiraspol': 'E. Europe Standard Time', 'Europe/Ulyanovsk': 'Astrakhan Standard Time', 'Europe/Uzhgorod': 'FLE Standard Time', 'Europe/Vaduz': 'W. Europe Standard Time', @@ -540,6 +593,14 @@ 'Europe/Zagreb': 'Central European Standard Time', 'Europe/Zaporozhye': 'FLE Standard Time', 'Europe/Zurich': 'W. Europe Standard Time', + 'GB': 'GMT Standard Time', + 'GB-Eire': 'GMT Standard Time', + 'GMT+0': 'UTC', + 'GMT-0': 'UTC', + 'GMT0': 'UTC', + 'Greenwich': 'UTC', + 'Hongkong': 'China Standard Time', + 'Iceland': 'Greenwich Standard Time', 'Indian/Antananarivo': 'E. Africa Standard Time', 'Indian/Chagos': 'Central Asia Standard Time', 'Indian/Christmas': 'SE Asia Standard Time', @@ -551,7 +612,20 @@ 'Indian/Mauritius': 'Mauritius Standard Time', 'Indian/Mayotte': 'E. Africa Standard Time', 'Indian/Reunion': 'Mauritius Standard Time', + 'Iran': 'Iran Standard Time', + 'Israel': 'Israel Standard Time', + 'Jamaica': 'SA Pacific Standard Time', + 'Japan': 'Tokyo Standard Time', + 'Kwajalein': 'UTC+12', + 'Libya': 'Libya Standard Time', 'MST7MDT': 'Mountain Standard Time', + 'Mexico/BajaNorte': 'Pacific Standard Time (Mexico)', + 'Mexico/BajaSur': 'Mountain Standard Time (Mexico)', + 'Mexico/General': 'Central Standard Time (Mexico)', + 'NZ': 'New Zealand Standard Time', + 'NZ-CHAT': 'Chatham Islands Standard Time', + 'Navajo': 'Mountain Standard Time', + 'PRC': 'China Standard Time', 'PST8PDT': 'Pacific Standard Time', 'Pacific/Apia': 'Samoa Standard Time', 'Pacific/Auckland': 'New Zealand Standard Time', @@ -559,8 +633,8 @@ 'Pacific/Chatham': 'Chatham Islands Standard Time', 'Pacific/Easter': 'Easter Island Standard Time', 'Pacific/Efate': 'Central Pacific Standard Time', - 'Pacific/Enderbury': 'Tonga Standard Time', - 'Pacific/Fakaofo': 'Tonga Standard Time', + 'Pacific/Enderbury': 'UTC+13', + 'Pacific/Fakaofo': 'UTC+13', 'Pacific/Fiji': 'Fiji Standard Time', 'Pacific/Funafuti': 'UTC+12', 'Pacific/Galapagos': 'Central America Standard Time', @@ -586,9 +660,31 @@ 'Pacific/Port_Moresby': 'West Pacific Standard Time', 'Pacific/Rarotonga': 'Hawaiian Standard Time', 'Pacific/Saipan': 'West Pacific Standard Time', + 'Pacific/Samoa': 'UTC-11', 'Pacific/Tahiti': 'Hawaiian Standard Time', 'Pacific/Tarawa': 'UTC+12', 'Pacific/Tongatapu': 'Tonga Standard Time', 'Pacific/Truk': 'West Pacific Standard Time', 'Pacific/Wake': 'UTC+12', - 'Pacific/Wallis': 'UTC+12'} + 'Pacific/Wallis': 'UTC+12', + 'Poland': 'Central European Standard Time', + 'Portugal': 'GMT Standard Time', + 'ROC': 'Taipei Standard Time', + 'ROK': 'Korea Standard Time', + 'Singapore': 'Singapore Standard Time', + 'Turkey': 'Turkey Standard Time', + 'US/Alaska': 'Alaskan Standard Time', + 'US/Aleutian': 'Aleutian Standard Time', + 'US/Arizona': 'US Mountain Standard Time', + 'US/Central': 'Central Standard Time', + 'US/Eastern': 'Eastern Standard Time', + 'US/Hawaii': 'Hawaiian Standard Time', + 'US/Indiana-Starke': 'Central Standard Time', + 'US/Michigan': 'Eastern Standard Time', + 'US/Mountain': 'Mountain Standard Time', + 'US/Pacific': 'Pacific Standard Time', + 'US/Samoa': 'UTC-11', + 'UTC': 'UTC', + 'Universal': 'UTC', + 'W-SU': 'Russian Standard Time', + 'Zulu': 'UTC'} diff --git a/ext/wrapt/__init__.py b/ext/wrapt/__init__.py new file mode 100644 index 0000000000..182a1e1100 --- /dev/null +++ b/ext/wrapt/__init__.py @@ -0,0 +1,19 @@ +__version_info__ = ('1', '10', '11') +__version__ = '.'.join(__version_info__) + +from .wrappers import (ObjectProxy, CallableObjectProxy, FunctionWrapper, + BoundFunctionWrapper, WeakFunctionProxy, resolve_path, apply_patch, + wrap_object, wrap_object_attribute, function_wrapper, + wrap_function_wrapper, patch_function_wrapper, + transient_function_wrapper) + +from .decorators import (adapter_factory, AdapterFactory, decorator, + synchronized) + +from .importer import (register_post_import_hook, when_imported, + notify_module_loaded, discover_post_import_hooks) + +try: + from inspect import getcallargs +except ImportError: + from .arguments import getcallargs diff --git a/ext/wrapt/arguments.py b/ext/wrapt/arguments.py new file mode 100644 index 0000000000..428ffaed04 --- /dev/null +++ b/ext/wrapt/arguments.py @@ -0,0 +1,96 @@ +# This is a copy of the inspect.getcallargs() function from Python 2.7 +# so we can provide it for use under Python 2.6. As the code in this +# file derives from the Python distribution, it falls under the version +# of the PSF license used for Python 2.7. + +from inspect import getargspec, ismethod +import sys + +def getcallargs(func, *positional, **named): + """Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.""" + args, varargs, varkw, defaults = getargspec(func) + f_name = func.__name__ + arg2value = {} + + # The following closures are basically because of tuple parameter unpacking. + assigned_tuple_params = [] + def assign(arg, value): + if isinstance(arg, str): + arg2value[arg] = value + else: + assigned_tuple_params.append(arg) + value = iter(value) + for i, subarg in enumerate(arg): + try: + subvalue = next(value) + except StopIteration: + raise ValueError('need more than %d %s to unpack' % + (i, 'values' if i > 1 else 'value')) + assign(subarg, subvalue) + try: + next(value) + except StopIteration: + pass + else: + raise ValueError('too many values to unpack') + def is_assigned(arg): + if isinstance(arg, str): + return arg in arg2value + return arg in assigned_tuple_params + if ismethod(func) and func.im_self is not None: + # implicit 'self' (or 'cls' for classmethods) argument + positional = (func.im_self,) + positional + num_pos = len(positional) + num_total = num_pos + len(named) + num_args = len(args) + num_defaults = len(defaults) if defaults else 0 + for arg, value in zip(args, positional): + assign(arg, value) + if varargs: + if num_pos > num_args: + assign(varargs, positional[-(num_pos-num_args):]) + else: + assign(varargs, ()) + elif 0 < num_args < num_pos: + raise TypeError('%s() takes %s %d %s (%d given)' % ( + f_name, 'at most' if defaults else 'exactly', num_args, + 'arguments' if num_args > 1 else 'argument', num_total)) + elif num_args == 0 and num_total: + if varkw: + if num_pos: + # XXX: We should use num_pos, but Python also uses num_total: + raise TypeError('%s() takes exactly 0 arguments ' + '(%d given)' % (f_name, num_total)) + else: + raise TypeError('%s() takes no arguments (%d given)' % + (f_name, num_total)) + for arg in args: + if isinstance(arg, str) and arg in named: + if is_assigned(arg): + raise TypeError("%s() got multiple values for keyword " + "argument '%s'" % (f_name, arg)) + else: + assign(arg, named.pop(arg)) + if defaults: # fill in any missing values with the defaults + for arg, value in zip(args[-num_defaults:], defaults): + if not is_assigned(arg): + assign(arg, value) + if varkw: + assign(varkw, named) + elif named: + unexpected = next(iter(named)) + if isinstance(unexpected, unicode): + unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') + raise TypeError("%s() got an unexpected keyword argument '%s'" % + (f_name, unexpected)) + unassigned = num_args - len([arg for arg in args if is_assigned(arg)]) + if unassigned: + num_required = num_args - num_defaults + raise TypeError('%s() takes %s %d %s (%d given)' % ( + f_name, 'at least' if defaults else 'exactly', num_required, + 'arguments' if num_required > 1 else 'argument', num_total)) + return arg2value diff --git a/ext/wrapt/decorators.py b/ext/wrapt/decorators.py new file mode 100644 index 0000000000..2ad3494617 --- /dev/null +++ b/ext/wrapt/decorators.py @@ -0,0 +1,512 @@ +"""This module implements decorators for implementing other decorators +as well as some commonly used decorators. + +""" + +import sys + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + + import builtins + exec_ = getattr(builtins, "exec") + del builtins + +else: + string_types = basestring, + + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + +from functools import partial +from inspect import ismethod, isclass, formatargspec +from collections import namedtuple +from threading import Lock, RLock + +try: + from inspect import signature +except ImportError: + pass + +from .wrappers import (FunctionWrapper, BoundFunctionWrapper, ObjectProxy, + CallableObjectProxy) + +# Adapter wrapper for the wrapped function which will overlay certain +# properties from the adapter function onto the wrapped function so that +# functions such as inspect.getargspec(), inspect.getfullargspec(), +# inspect.signature() and inspect.getsource() return the correct results +# one would expect. + +class _AdapterFunctionCode(CallableObjectProxy): + + def __init__(self, wrapped_code, adapter_code): + super(_AdapterFunctionCode, self).__init__(wrapped_code) + self._self_adapter_code = adapter_code + + @property + def co_argcount(self): + return self._self_adapter_code.co_argcount + + @property + def co_code(self): + return self._self_adapter_code.co_code + + @property + def co_flags(self): + return self._self_adapter_code.co_flags + + @property + def co_kwonlyargcount(self): + return self._self_adapter_code.co_kwonlyargcount + + @property + def co_varnames(self): + return self._self_adapter_code.co_varnames + +class _AdapterFunctionSurrogate(CallableObjectProxy): + + def __init__(self, wrapped, adapter): + super(_AdapterFunctionSurrogate, self).__init__(wrapped) + self._self_adapter = adapter + + @property + def __code__(self): + return _AdapterFunctionCode(self.__wrapped__.__code__, + self._self_adapter.__code__) + + @property + def __defaults__(self): + return self._self_adapter.__defaults__ + + @property + def __kwdefaults__(self): + return self._self_adapter.__kwdefaults__ + + @property + def __signature__(self): + if 'signature' not in globals(): + return self._self_adapter.__signature__ + else: + # Can't allow this to fail on Python 3 else it falls + # through to using __wrapped__, but that will be the + # wrong function we want to derive the signature + # from. Thus generate the signature ourselves. + + return signature(self._self_adapter) + + if PY2: + func_code = __code__ + func_defaults = __defaults__ + +class _BoundAdapterWrapper(BoundFunctionWrapper): + + @property + def __func__(self): + return _AdapterFunctionSurrogate(self.__wrapped__.__func__, + self._self_parent._self_adapter) + + if PY2: + im_func = __func__ + +class AdapterWrapper(FunctionWrapper): + + __bound_function_wrapper__ = _BoundAdapterWrapper + + def __init__(self, *args, **kwargs): + adapter = kwargs.pop('adapter') + super(AdapterWrapper, self).__init__(*args, **kwargs) + self._self_surrogate = _AdapterFunctionSurrogate( + self.__wrapped__, adapter) + self._self_adapter = adapter + + @property + def __code__(self): + return self._self_surrogate.__code__ + + @property + def __defaults__(self): + return self._self_surrogate.__defaults__ + + @property + def __kwdefaults__(self): + return self._self_surrogate.__kwdefaults__ + + if PY2: + func_code = __code__ + func_defaults = __defaults__ + + @property + def __signature__(self): + return self._self_surrogate.__signature__ + +class AdapterFactory(object): + def __call__(self, wrapped): + raise NotImplementedError() + +class DelegatedAdapterFactory(AdapterFactory): + def __init__(self, factory): + super(DelegatedAdapterFactory, self).__init__() + self.factory = factory + def __call__(self, wrapped): + return self.factory(wrapped) + +adapter_factory = DelegatedAdapterFactory + +# Decorator for creating other decorators. This decorator and the +# wrappers which they use are designed to properly preserve any name +# attributes, function signatures etc, in addition to the wrappers +# themselves acting like a transparent proxy for the original wrapped +# function so the wrapper is effectively indistinguishable from the +# original wrapped function. + +def decorator(wrapper=None, enabled=None, adapter=None): + # The decorator should be supplied with a single positional argument + # which is the wrapper function to be used to implement the + # decorator. This may be preceded by a step whereby the keyword + # arguments are supplied to customise the behaviour of the + # decorator. The 'adapter' argument is used to optionally denote a + # separate function which is notionally used by an adapter + # decorator. In that case parts of the function '__code__' and + # '__defaults__' attributes are used from the adapter function + # rather than those of the wrapped function. This allows for the + # argument specification from inspect.getargspec() and similar + # functions to be overridden with a prototype for a different + # function than what was wrapped. The 'enabled' argument provides a + # way to enable/disable the use of the decorator. If the type of + # 'enabled' is a boolean, then it is evaluated immediately and the + # wrapper not even applied if it is False. If not a boolean, it will + # be evaluated when the wrapper is called for an unbound wrapper, + # and when binding occurs for a bound wrapper. When being evaluated, + # if 'enabled' is callable it will be called to obtain the value to + # be checked. If False, the wrapper will not be called and instead + # the original wrapped function will be called directly instead. + + if wrapper is not None: + # Helper function for creating wrapper of the appropriate + # time when we need it down below. + + def _build(wrapped, wrapper, enabled=None, adapter=None): + if adapter: + if isinstance(adapter, AdapterFactory): + adapter = adapter(wrapped) + + if not callable(adapter): + ns = {} + if not isinstance(adapter, string_types): + adapter = formatargspec(*adapter) + exec_('def adapter{0}: pass'.format(adapter), ns, ns) + adapter = ns['adapter'] + + return AdapterWrapper(wrapped=wrapped, wrapper=wrapper, + enabled=enabled, adapter=adapter) + + return FunctionWrapper(wrapped=wrapped, wrapper=wrapper, + enabled=enabled) + + # The wrapper has been provided so return the final decorator. + # The decorator is itself one of our function wrappers so we + # can determine when it is applied to functions, instance methods + # or class methods. This allows us to bind the instance or class + # method so the appropriate self or cls attribute is supplied + # when it is finally called. + + def _wrapper(wrapped, instance, args, kwargs): + # We first check for the case where the decorator was applied + # to a class type. + # + # @decorator + # class mydecoratorclass(object): + # def __init__(self, arg=None): + # self.arg = arg + # def __call__(self, wrapped, instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorclass(arg=1) + # def function(): + # pass + # + # In this case an instance of the class is to be used as the + # decorator wrapper function. If args was empty at this point, + # then it means that there were optional keyword arguments + # supplied to be used when creating an instance of the class + # to be used as the wrapper function. + + if instance is None and isclass(wrapped) and not args: + # We still need to be passed the target function to be + # wrapped as yet, so we need to return a further function + # to be able to capture it. + + def _capture(target_wrapped): + # Now have the target function to be wrapped and need + # to create an instance of the class which is to act + # as the decorator wrapper function. Before we do that, + # we need to first check that use of the decorator + # hadn't been disabled by a simple boolean. If it was, + # the target function to be wrapped is returned instead. + + _enabled = enabled + if type(_enabled) is bool: + if not _enabled: + return target_wrapped + _enabled = None + + # Now create an instance of the class which is to act + # as the decorator wrapper function. Any arguments had + # to be supplied as keyword only arguments so that is + # all we pass when creating it. + + target_wrapper = wrapped(**kwargs) + + # Finally build the wrapper itself and return it. + + return _build(target_wrapped, target_wrapper, + _enabled, adapter) + + return _capture + + # We should always have the target function to be wrapped at + # this point as the first (and only) value in args. + + target_wrapped = args[0] + + # Need to now check that use of the decorator hadn't been + # disabled by a simple boolean. If it was, then target + # function to be wrapped is returned instead. + + _enabled = enabled + if type(_enabled) is bool: + if not _enabled: + return target_wrapped + _enabled = None + + # We now need to build the wrapper, but there are a couple of + # different cases we need to consider. + + if instance is None: + if isclass(wrapped): + # In this case the decorator was applied to a class + # type but optional keyword arguments were not supplied + # for initialising an instance of the class to be used + # as the decorator wrapper function. + # + # @decorator + # class mydecoratorclass(object): + # def __init__(self, arg=None): + # self.arg = arg + # def __call__(self, wrapped, instance, + # args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorclass + # def function(): + # pass + # + # We still need to create an instance of the class to + # be used as the decorator wrapper function, but no + # arguments are pass. + + target_wrapper = wrapped() + + else: + # In this case the decorator was applied to a normal + # function, or possibly a static method of a class. + # + # @decorator + # def mydecoratorfuntion(wrapped, instance, + # args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorfunction + # def function(): + # pass + # + # That normal function becomes the decorator wrapper + # function. + + target_wrapper = wrapper + + else: + if isclass(instance): + # In this case the decorator was applied to a class + # method. + # + # class myclass(object): + # @decorator + # @classmethod + # def decoratorclassmethod(cls, wrapped, + # instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # instance = myclass() + # + # @instance.decoratorclassmethod + # def function(): + # pass + # + # This one is a bit strange because binding was actually + # performed on the wrapper created by our decorator + # factory. We need to apply that binding to the decorator + # wrapper function which which the decorator factory + # was applied to. + + target_wrapper = wrapper.__get__(None, instance) + + else: + # In this case the decorator was applied to an instance + # method. + # + # class myclass(object): + # @decorator + # def decoratorclassmethod(self, wrapped, + # instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # instance = myclass() + # + # @instance.decoratorclassmethod + # def function(): + # pass + # + # This one is a bit strange because binding was actually + # performed on the wrapper created by our decorator + # factory. We need to apply that binding to the decorator + # wrapper function which which the decorator factory + # was applied to. + + target_wrapper = wrapper.__get__(instance, type(instance)) + + # Finally build the wrapper itself and return it. + + return _build(target_wrapped, target_wrapper, _enabled, adapter) + + # We first return our magic function wrapper here so we can + # determine in what context the decorator factory was used. In + # other words, it is itself a universal decorator. + + return _build(wrapper, _wrapper) + + else: + # The wrapper still has not been provided, so we are just + # collecting the optional keyword arguments. Return the + # decorator again wrapped in a partial using the collected + # arguments. + + return partial(decorator, enabled=enabled, adapter=adapter) + +# Decorator for implementing thread synchronization. It can be used as a +# decorator, in which case the synchronization context is determined by +# what type of function is wrapped, or it can also be used as a context +# manager, where the user needs to supply the correct synchronization +# context. It is also possible to supply an object which appears to be a +# synchronization primitive of some sort, by virtue of having release() +# and acquire() methods. In that case that will be used directly as the +# synchronization primitive without creating a separate lock against the +# derived or supplied context. + +def synchronized(wrapped): + # Determine if being passed an object which is a synchronization + # primitive. We can't check by type for Lock, RLock, Semaphore etc, + # as the means of creating them isn't the type. Therefore use the + # existence of acquire() and release() methods. This is more + # extensible anyway as it allows custom synchronization mechanisms. + + if hasattr(wrapped, 'acquire') and hasattr(wrapped, 'release'): + # We remember what the original lock is and then return a new + # decorator which accesses and locks it. When returning the new + # decorator we wrap it with an object proxy so we can override + # the context manager methods in case it is being used to wrap + # synchronized statements with a 'with' statement. + + lock = wrapped + + @decorator + def _synchronized(wrapped, instance, args, kwargs): + # Execute the wrapped function while the original supplied + # lock is held. + + with lock: + return wrapped(*args, **kwargs) + + class _PartialDecorator(CallableObjectProxy): + + def __enter__(self): + lock.acquire() + return lock + + def __exit__(self, *args): + lock.release() + + return _PartialDecorator(wrapped=_synchronized) + + # Following only apply when the lock is being created automatically + # based on the context of what was supplied. In this case we supply + # a final decorator, but need to use FunctionWrapper directly as we + # want to derive from it to add context manager methods in case it is + # being used to wrap synchronized statements with a 'with' statement. + + def _synchronized_lock(context): + # Attempt to retrieve the lock for the specific context. + + lock = vars(context).get('_synchronized_lock', None) + + if lock is None: + # There is no existing lock defined for the context we + # are dealing with so we need to create one. This needs + # to be done in a way to guarantee there is only one + # created, even if multiple threads try and create it at + # the same time. We can't always use the setdefault() + # method on the __dict__ for the context. This is the + # case where the context is a class, as __dict__ is + # actually a dictproxy. What we therefore do is use a + # meta lock on this wrapper itself, to control the + # creation and assignment of the lock attribute against + # the context. + + meta_lock = vars(synchronized).setdefault( + '_synchronized_meta_lock', Lock()) + + with meta_lock: + # We need to check again for whether the lock we want + # exists in case two threads were trying to create it + # at the same time and were competing to create the + # meta lock. + + lock = vars(context).get('_synchronized_lock', None) + + if lock is None: + lock = RLock() + setattr(context, '_synchronized_lock', lock) + + return lock + + def _synchronized_wrapper(wrapped, instance, args, kwargs): + # Execute the wrapped function while the lock for the + # desired context is held. If instance is None then the + # wrapped function is used as the context. + + with _synchronized_lock(instance or wrapped): + return wrapped(*args, **kwargs) + + class _FinalDecorator(FunctionWrapper): + + def __enter__(self): + self._self_lock = _synchronized_lock(self.__wrapped__) + self._self_lock.acquire() + return self._self_lock + + def __exit__(self, *args): + self._self_lock.release() + + return _FinalDecorator(wrapped=wrapped, wrapper=_synchronized_wrapper) diff --git a/ext/wrapt/importer.py b/ext/wrapt/importer.py new file mode 100644 index 0000000000..7fd2d8dfa6 --- /dev/null +++ b/ext/wrapt/importer.py @@ -0,0 +1,228 @@ +"""This module implements a post import hook mechanism styled after what is +described in PEP-369. Note that it doesn't cope with modules being reloaded. + +""" + +import sys +import threading + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + import importlib + string_types = str, +else: + string_types = basestring, + +from .decorators import synchronized + +# The dictionary registering any post import hooks to be triggered once +# the target module has been imported. Once a module has been imported +# and the hooks fired, the list of hooks recorded against the target +# module will be truncacted but the list left in the dictionary. This +# acts as a flag to indicate that the module had already been imported. + +_post_import_hooks = {} +_post_import_hooks_init = False +_post_import_hooks_lock = threading.RLock() + +# Register a new post import hook for the target module name. This +# differs from the PEP-369 implementation in that it also allows the +# hook function to be specified as a string consisting of the name of +# the callback in the form 'module:function'. This will result in a +# proxy callback being registered which will defer loading of the +# specified module containing the callback function until required. + +def _create_import_hook_from_string(name): + def import_hook(module): + module_name, function = name.split(':') + attrs = function.split('.') + __import__(module_name) + callback = sys.modules[module_name] + for attr in attrs: + callback = getattr(callback, attr) + return callback(module) + return import_hook + +@synchronized(_post_import_hooks_lock) +def register_post_import_hook(hook, name): + # Create a deferred import hook if hook is a string name rather than + # a callable function. + + if isinstance(hook, string_types): + hook = _create_import_hook_from_string(hook) + + # Automatically install the import hook finder if it has not already + # been installed. + + global _post_import_hooks_init + + if not _post_import_hooks_init: + _post_import_hooks_init = True + sys.meta_path.insert(0, ImportHookFinder()) + + # Determine if any prior registration of a post import hook for + # the target modules has occurred and act appropriately. + + hooks = _post_import_hooks.get(name, None) + + if hooks is None: + # No prior registration of post import hooks for the target + # module. We need to check whether the module has already been + # imported. If it has we fire the hook immediately and add an + # empty list to the registry to indicate that the module has + # already been imported and hooks have fired. Otherwise add + # the post import hook to the registry. + + module = sys.modules.get(name, None) + + if module is not None: + _post_import_hooks[name] = [] + hook(module) + + else: + _post_import_hooks[name] = [hook] + + elif hooks == []: + # A prior registration of port import hooks for the target + # module was done and the hooks already fired. Fire the hook + # immediately. + + module = sys.modules[name] + hook(module) + + else: + # A prior registration of port import hooks for the target + # module was done but the module has not yet been imported. + + _post_import_hooks[name].append(hook) + +# Register post import hooks defined as package entry points. + +def _create_import_hook_from_entrypoint(entrypoint): + def import_hook(module): + __import__(entrypoint.module_name) + callback = sys.modules[entrypoint.module_name] + for attr in entrypoint.attrs: + callback = getattr(callback, attr) + return callback(module) + return import_hook + +def discover_post_import_hooks(group): + try: + import pkg_resources + except ImportError: + return + + for entrypoint in pkg_resources.iter_entry_points(group=group): + callback = _create_import_hook_from_entrypoint(entrypoint) + register_post_import_hook(callback, entrypoint.name) + +# Indicate that a module has been loaded. Any post import hooks which +# were registered against the target module will be invoked. If an +# exception is raised in any of the post import hooks, that will cause +# the import of the target module to fail. + +@synchronized(_post_import_hooks_lock) +def notify_module_loaded(module): + name = getattr(module, '__name__', None) + hooks = _post_import_hooks.get(name, None) + + if hooks: + _post_import_hooks[name] = [] + + for hook in hooks: + hook(module) + +# A custom module import finder. This intercepts attempts to import +# modules and watches out for attempts to import target modules of +# interest. When a module of interest is imported, then any post import +# hooks which are registered will be invoked. + +class _ImportHookLoader: + + def load_module(self, fullname): + module = sys.modules[fullname] + notify_module_loaded(module) + + return module + +class _ImportHookChainedLoader: + + def __init__(self, loader): + self.loader = loader + + def load_module(self, fullname): + module = self.loader.load_module(fullname) + notify_module_loaded(module) + + return module + +class ImportHookFinder: + + def __init__(self): + self.in_progress = {} + + @synchronized(_post_import_hooks_lock) + def find_module(self, fullname, path=None): + # If the module being imported is not one we have registered + # post import hooks for, we can return immediately. We will + # take no further part in the importing of this module. + + if not fullname in _post_import_hooks: + return None + + # When we are interested in a specific module, we will call back + # into the import system a second time to defer to the import + # finder that is supposed to handle the importing of the module. + # We set an in progress flag for the target module so that on + # the second time through we don't trigger another call back + # into the import system and cause a infinite loop. + + if fullname in self.in_progress: + return None + + self.in_progress[fullname] = True + + # Now call back into the import system again. + + try: + if PY3: + # For Python 3 we need to use find_loader() from + # the importlib module. It doesn't actually + # import the target module and only finds the + # loader. If a loader is found, we need to return + # our own loader which will then in turn call the + # real loader to import the module and invoke the + # post import hooks. + + loader = importlib.find_loader(fullname, path) + + if loader: + return _ImportHookChainedLoader(loader) + + else: + # For Python 2 we don't have much choice but to + # call back in to __import__(). This will + # actually cause the module to be imported. If no + # module could be found then ImportError will be + # raised. Otherwise we return a loader which + # returns the already loaded module and invokes + # the post import hooks. + + __import__(fullname) + + return _ImportHookLoader() + + finally: + del self.in_progress[fullname] + +# Decorator for marking that a function should be called as a post +# import hook when the target module is imported. + +def when_imported(name): + def register(hook): + register_post_import_hook(hook, name) + return hook + return register diff --git a/ext/wrapt/wrappers.py b/ext/wrapt/wrappers.py new file mode 100644 index 0000000000..2125305d12 --- /dev/null +++ b/ext/wrapt/wrappers.py @@ -0,0 +1,901 @@ +import os +import sys +import functools +import operator +import weakref +import inspect + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, +else: + string_types = basestring, + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + return meta("NewBase", bases, {}) + +class _ObjectProxyMethods(object): + + # We use properties to override the values of __module__ and + # __doc__. If we add these in ObjectProxy, the derived class + # __dict__ will still be setup to have string variants of these + # attributes and the rules of descriptors means that they appear to + # take precedence over the properties in the base class. To avoid + # that, we copy the properties into the derived class type itself + # via a meta class. In that way the properties will always take + # precedence. + + @property + def __module__(self): + return self.__wrapped__.__module__ + + @__module__.setter + def __module__(self, value): + self.__wrapped__.__module__ = value + + @property + def __doc__(self): + return self.__wrapped__.__doc__ + + @__doc__.setter + def __doc__(self, value): + self.__wrapped__.__doc__ = value + + # We similar use a property for __dict__. We need __dict__ to be + # explicit to ensure that vars() works as expected. + + @property + def __dict__(self): + return self.__wrapped__.__dict__ + + # Need to also propagate the special __weakref__ attribute for case + # where decorating classes which will define this. If do not define + # it and use a function like inspect.getmembers() on a decorator + # class it will fail. This can't be in the derived classes. + + @property + def __weakref__(self): + return self.__wrapped__.__weakref__ + +class _ObjectProxyMetaType(type): + def __new__(cls, name, bases, dictionary): + # Copy our special properties into the class so that they + # always take precedence over attributes of the same name added + # during construction of a derived class. This is to save + # duplicating the implementation for them in all derived classes. + + dictionary.update(vars(_ObjectProxyMethods)) + + return type.__new__(cls, name, bases, dictionary) + +class ObjectProxy(with_metaclass(_ObjectProxyMetaType)): + + __slots__ = '__wrapped__' + + def __init__(self, wrapped): + object.__setattr__(self, '__wrapped__', wrapped) + + # Python 3.2+ has the __qualname__ attribute, but it does not + # allow it to be overridden using a property and it must instead + # be an actual string object instead. + + try: + object.__setattr__(self, '__qualname__', wrapped.__qualname__) + except AttributeError: + pass + + @property + def __name__(self): + return self.__wrapped__.__name__ + + @__name__.setter + def __name__(self, value): + self.__wrapped__.__name__ = value + + @property + def __class__(self): + return self.__wrapped__.__class__ + + @__class__.setter + def __class__(self, value): + self.__wrapped__.__class__ = value + + @property + def __annotations__(self): + return self.__wrapped__.__anotations__ + + @__annotations__.setter + def __annotations__(self, value): + self.__wrapped__.__annotations__ = value + + def __dir__(self): + return dir(self.__wrapped__) + + def __str__(self): + return str(self.__wrapped__) + + if PY3: + def __bytes__(self): + return bytes(self.__wrapped__) + + def __repr__(self): + return '<%s at 0x%x for %s at 0x%x>' % ( + type(self).__name__, id(self), + type(self.__wrapped__).__name__, + id(self.__wrapped__)) + + def __reversed__(self): + return reversed(self.__wrapped__) + + if PY3: + def __round__(self): + return round(self.__wrapped__) + + def __lt__(self, other): + return self.__wrapped__ < other + + def __le__(self, other): + return self.__wrapped__ <= other + + def __eq__(self, other): + return self.__wrapped__ == other + + def __ne__(self, other): + return self.__wrapped__ != other + + def __gt__(self, other): + return self.__wrapped__ > other + + def __ge__(self, other): + return self.__wrapped__ >= other + + def __hash__(self): + return hash(self.__wrapped__) + + def __nonzero__(self): + return bool(self.__wrapped__) + + def __bool__(self): + return bool(self.__wrapped__) + + def __setattr__(self, name, value): + if name.startswith('_self_'): + object.__setattr__(self, name, value) + + elif name == '__wrapped__': + object.__setattr__(self, name, value) + try: + object.__delattr__(self, '__qualname__') + except AttributeError: + pass + try: + object.__setattr__(self, '__qualname__', value.__qualname__) + except AttributeError: + pass + + elif name == '__qualname__': + setattr(self.__wrapped__, name, value) + object.__setattr__(self, name, value) + + elif hasattr(type(self), name): + object.__setattr__(self, name, value) + + else: + setattr(self.__wrapped__, name, value) + + def __getattr__(self, name): + # If we are being to lookup '__wrapped__' then the + # '__init__()' method cannot have been called. + + if name == '__wrapped__': + raise ValueError('wrapper has not been initialised') + + return getattr(self.__wrapped__, name) + + def __delattr__(self, name): + if name.startswith('_self_'): + object.__delattr__(self, name) + + elif name == '__wrapped__': + raise TypeError('__wrapped__ must be an object') + + elif name == '__qualname__': + object.__delattr__(self, name) + delattr(self.__wrapped__, name) + + elif hasattr(type(self), name): + object.__delattr__(self, name) + + else: + delattr(self.__wrapped__, name) + + def __add__(self, other): + return self.__wrapped__ + other + + def __sub__(self, other): + return self.__wrapped__ - other + + def __mul__(self, other): + return self.__wrapped__ * other + + def __div__(self, other): + return operator.div(self.__wrapped__, other) + + def __truediv__(self, other): + return operator.truediv(self.__wrapped__, other) + + def __floordiv__(self, other): + return self.__wrapped__ // other + + def __mod__(self, other): + return self.__wrapped__ % other + + def __divmod__(self, other): + return divmod(self.__wrapped__, other) + + def __pow__(self, other, *args): + return pow(self.__wrapped__, other, *args) + + def __lshift__(self, other): + return self.__wrapped__ << other + + def __rshift__(self, other): + return self.__wrapped__ >> other + + def __and__(self, other): + return self.__wrapped__ & other + + def __xor__(self, other): + return self.__wrapped__ ^ other + + def __or__(self, other): + return self.__wrapped__ | other + + def __radd__(self, other): + return other + self.__wrapped__ + + def __rsub__(self, other): + return other - self.__wrapped__ + + def __rmul__(self, other): + return other * self.__wrapped__ + + def __rdiv__(self, other): + return operator.div(other, self.__wrapped__) + + def __rtruediv__(self, other): + return operator.truediv(other, self.__wrapped__) + + def __rfloordiv__(self, other): + return other // self.__wrapped__ + + def __rmod__(self, other): + return other % self.__wrapped__ + + def __rdivmod__(self, other): + return divmod(other, self.__wrapped__) + + def __rpow__(self, other, *args): + return pow(other, self.__wrapped__, *args) + + def __rlshift__(self, other): + return other << self.__wrapped__ + + def __rrshift__(self, other): + return other >> self.__wrapped__ + + def __rand__(self, other): + return other & self.__wrapped__ + + def __rxor__(self, other): + return other ^ self.__wrapped__ + + def __ror__(self, other): + return other | self.__wrapped__ + + def __iadd__(self, other): + self.__wrapped__ += other + return self + + def __isub__(self, other): + self.__wrapped__ -= other + return self + + def __imul__(self, other): + self.__wrapped__ *= other + return self + + def __idiv__(self, other): + self.__wrapped__ = operator.idiv(self.__wrapped__, other) + return self + + def __itruediv__(self, other): + self.__wrapped__ = operator.itruediv(self.__wrapped__, other) + return self + + def __ifloordiv__(self, other): + self.__wrapped__ //= other + return self + + def __imod__(self, other): + self.__wrapped__ %= other + return self + + def __ipow__(self, other): + self.__wrapped__ **= other + return self + + def __ilshift__(self, other): + self.__wrapped__ <<= other + return self + + def __irshift__(self, other): + self.__wrapped__ >>= other + return self + + def __iand__(self, other): + self.__wrapped__ &= other + return self + + def __ixor__(self, other): + self.__wrapped__ ^= other + return self + + def __ior__(self, other): + self.__wrapped__ |= other + return self + + def __neg__(self): + return -self.__wrapped__ + + def __pos__(self): + return +self.__wrapped__ + + def __abs__(self): + return abs(self.__wrapped__) + + def __invert__(self): + return ~self.__wrapped__ + + def __int__(self): + return int(self.__wrapped__) + + def __long__(self): + return long(self.__wrapped__) + + def __float__(self): + return float(self.__wrapped__) + + def __oct__(self): + return oct(self.__wrapped__) + + def __hex__(self): + return hex(self.__wrapped__) + + def __index__(self): + return operator.index(self.__wrapped__) + + def __len__(self): + return len(self.__wrapped__) + + def __contains__(self, value): + return value in self.__wrapped__ + + def __getitem__(self, key): + return self.__wrapped__[key] + + def __setitem__(self, key, value): + self.__wrapped__[key] = value + + def __delitem__(self, key): + del self.__wrapped__[key] + + def __getslice__(self, i, j): + return self.__wrapped__[i:j] + + def __setslice__(self, i, j, value): + self.__wrapped__[i:j] = value + + def __delslice__(self, i, j): + del self.__wrapped__[i:j] + + def __enter__(self): + return self.__wrapped__.__enter__() + + def __exit__(self, *args, **kwargs): + return self.__wrapped__.__exit__(*args, **kwargs) + + def __iter__(self): + return iter(self.__wrapped__) + +class CallableObjectProxy(ObjectProxy): + + def __call__(self, *args, **kwargs): + return self.__wrapped__(*args, **kwargs) + +class _FunctionWrapperBase(ObjectProxy): + + __slots__ = ('_self_instance', '_self_wrapper', '_self_enabled', + '_self_binding', '_self_parent') + + def __init__(self, wrapped, instance, wrapper, enabled=None, + binding='function', parent=None): + + super(_FunctionWrapperBase, self).__init__(wrapped) + + object.__setattr__(self, '_self_instance', instance) + object.__setattr__(self, '_self_wrapper', wrapper) + object.__setattr__(self, '_self_enabled', enabled) + object.__setattr__(self, '_self_binding', binding) + object.__setattr__(self, '_self_parent', parent) + + def __get__(self, instance, owner): + # This method is actually doing double duty for both unbound and + # bound derived wrapper classes. It should possibly be broken up + # and the distinct functionality moved into the derived classes. + # Can't do that straight away due to some legacy code which is + # relying on it being here in this base class. + # + # The distinguishing attribute which determines whether we are + # being called in an unbound or bound wrapper is the parent + # attribute. If binding has never occurred, then the parent will + # be None. + # + # First therefore, is if we are called in an unbound wrapper. In + # this case we perform the binding. + # + # We have one special case to worry about here. This is where we + # are decorating a nested class. In this case the wrapped class + # would not have a __get__() method to call. In that case we + # simply return self. + # + # Note that we otherwise still do binding even if instance is + # None and accessing an unbound instance method from a class. + # This is because we need to be able to later detect that + # specific case as we will need to extract the instance from the + # first argument of those passed in. + + if self._self_parent is None: + if not inspect.isclass(self.__wrapped__): + descriptor = self.__wrapped__.__get__(instance, owner) + + return self.__bound_function_wrapper__(descriptor, instance, + self._self_wrapper, self._self_enabled, + self._self_binding, self) + + return self + + # Now we have the case of binding occurring a second time on what + # was already a bound function. In this case we would usually + # return ourselves again. This mirrors what Python does. + # + # The special case this time is where we were originally bound + # with an instance of None and we were likely an instance + # method. In that case we rebind against the original wrapped + # function from the parent again. + + if self._self_instance is None and self._self_binding == 'function': + descriptor = self._self_parent.__wrapped__.__get__( + instance, owner) + + return self._self_parent.__bound_function_wrapper__( + descriptor, instance, self._self_wrapper, + self._self_enabled, self._self_binding, + self._self_parent) + + return self + + def __call__(self, *args, **kwargs): + # If enabled has been specified, then evaluate it at this point + # and if the wrapper is not to be executed, then simply return + # the bound function rather than a bound wrapper for the bound + # function. When evaluating enabled, if it is callable we call + # it, otherwise we evaluate it as a boolean. + + if self._self_enabled is not None: + if callable(self._self_enabled): + if not self._self_enabled(): + return self.__wrapped__(*args, **kwargs) + elif not self._self_enabled: + return self.__wrapped__(*args, **kwargs) + + # This can occur where initial function wrapper was applied to + # a function that was already bound to an instance. In that case + # we want to extract the instance from the function and use it. + + if self._self_binding == 'function': + if self._self_instance is None: + instance = getattr(self.__wrapped__, '__self__', None) + if instance is not None: + return self._self_wrapper(self.__wrapped__, instance, + args, kwargs) + + # This is generally invoked when the wrapped function is being + # called as a normal function and is not bound to a class as an + # instance method. This is also invoked in the case where the + # wrapped function was a method, but this wrapper was in turn + # wrapped using the staticmethod decorator. + + return self._self_wrapper(self.__wrapped__, self._self_instance, + args, kwargs) + +class BoundFunctionWrapper(_FunctionWrapperBase): + + def __call__(self, *args, **kwargs): + # If enabled has been specified, then evaluate it at this point + # and if the wrapper is not to be executed, then simply return + # the bound function rather than a bound wrapper for the bound + # function. When evaluating enabled, if it is callable we call + # it, otherwise we evaluate it as a boolean. + + if self._self_enabled is not None: + if callable(self._self_enabled): + if not self._self_enabled(): + return self.__wrapped__(*args, **kwargs) + elif not self._self_enabled: + return self.__wrapped__(*args, **kwargs) + + # We need to do things different depending on whether we are + # likely wrapping an instance method vs a static method or class + # method. + + if self._self_binding == 'function': + if self._self_instance is None: + # This situation can occur where someone is calling the + # instancemethod via the class type and passing the instance + # as the first argument. We need to shift the args before + # making the call to the wrapper and effectively bind the + # instance to the wrapped function using a partial so the + # wrapper doesn't see anything as being different. + + if not args: + raise TypeError('missing 1 required positional argument') + + instance, args = args[0], args[1:] + wrapped = functools.partial(self.__wrapped__, instance) + return self._self_wrapper(wrapped, instance, args, kwargs) + + return self._self_wrapper(self.__wrapped__, self._self_instance, + args, kwargs) + + else: + # As in this case we would be dealing with a classmethod or + # staticmethod, then _self_instance will only tell us whether + # when calling the classmethod or staticmethod they did it via an + # instance of the class it is bound to and not the case where + # done by the class type itself. We thus ignore _self_instance + # and use the __self__ attribute of the bound function instead. + # For a classmethod, this means instance will be the class type + # and for a staticmethod it will be None. This is probably the + # more useful thing we can pass through even though we loose + # knowledge of whether they were called on the instance vs the + # class type, as it reflects what they have available in the + # decoratored function. + + instance = getattr(self.__wrapped__, '__self__', None) + + return self._self_wrapper(self.__wrapped__, instance, args, + kwargs) + +class FunctionWrapper(_FunctionWrapperBase): + + __bound_function_wrapper__ = BoundFunctionWrapper + + def __init__(self, wrapped, wrapper, enabled=None): + # What it is we are wrapping here could be anything. We need to + # try and detect specific cases though. In particular, we need + # to detect when we are given something that is a method of a + # class. Further, we need to know when it is likely an instance + # method, as opposed to a class or static method. This can + # become problematic though as there isn't strictly a fool proof + # method of knowing. + # + # The situations we could encounter when wrapping a method are: + # + # 1. The wrapper is being applied as part of a decorator which + # is a part of the class definition. In this case what we are + # given is the raw unbound function, classmethod or staticmethod + # wrapper objects. + # + # The problem here is that we will not know we are being applied + # in the context of the class being set up. This becomes + # important later for the case of an instance method, because in + # that case we just see it as a raw function and can't + # distinguish it from wrapping a normal function outside of + # a class context. + # + # 2. The wrapper is being applied when performing monkey + # patching of the class type afterwards and the method to be + # wrapped was retrieved direct from the __dict__ of the class + # type. This is effectively the same as (1) above. + # + # 3. The wrapper is being applied when performing monkey + # patching of the class type afterwards and the method to be + # wrapped was retrieved from the class type. In this case + # binding will have been performed where the instance against + # which the method is bound will be None at that point. + # + # This case is a problem because we can no longer tell if the + # method was a static method, plus if using Python3, we cannot + # tell if it was an instance method as the concept of an + # unnbound method no longer exists. + # + # 4. The wrapper is being applied when performing monkey + # patching of an instance of a class. In this case binding will + # have been perfomed where the instance was not None. + # + # This case is a problem because we can no longer tell if the + # method was a static method. + # + # Overall, the best we can do is look at the original type of the + # object which was wrapped prior to any binding being done and + # see if it is an instance of classmethod or staticmethod. In + # the case where other decorators are between us and them, if + # they do not propagate the __class__ attribute so that the + # isinstance() checks works, then likely this will do the wrong + # thing where classmethod and staticmethod are used. + # + # Since it is likely to be very rare that anyone even puts + # decorators around classmethod and staticmethod, likelihood of + # that being an issue is very small, so we accept it and suggest + # that those other decorators be fixed. It is also only an issue + # if a decorator wants to actually do things with the arguments. + # + # As to not being able to identify static methods properly, we + # just hope that that isn't something people are going to want + # to wrap, or if they do suggest they do it the correct way by + # ensuring that it is decorated in the class definition itself, + # or patch it in the __dict__ of the class type. + # + # So to get the best outcome we can, whenever we aren't sure what + # it is, we label it as a 'function'. If it was already bound and + # that is rebound later, we assume that it will be an instance + # method and try an cope with the possibility that the 'self' + # argument it being passed as an explicit argument and shuffle + # the arguments around to extract 'self' for use as the instance. + + if isinstance(wrapped, classmethod): + binding = 'classmethod' + + elif isinstance(wrapped, staticmethod): + binding = 'staticmethod' + + elif hasattr(wrapped, '__self__'): + if inspect.isclass(wrapped.__self__): + binding = 'classmethod' + else: + binding = 'function' + + else: + binding = 'function' + + super(FunctionWrapper, self).__init__(wrapped, None, wrapper, + enabled, binding) + +try: + if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'): + from ._wrappers import (ObjectProxy, CallableObjectProxy, + FunctionWrapper, BoundFunctionWrapper, _FunctionWrapperBase) +except ImportError: + pass + +# Helper functions for applying wrappers to existing functions. + +def resolve_path(module, name): + if isinstance(module, string_types): + __import__(module) + module = sys.modules[module] + + parent = module + + path = name.split('.') + attribute = path[0] + + original = getattr(parent, attribute) + for attribute in path[1:]: + parent = original + + # We can't just always use getattr() because in doing + # that on a class it will cause binding to occur which + # will complicate things later and cause some things not + # to work. For the case of a class we therefore access + # the __dict__ directly. To cope though with the wrong + # class being given to us, or a method being moved into + # a base class, we need to walk the class hierarchy to + # work out exactly which __dict__ the method was defined + # in, as accessing it from __dict__ will fail if it was + # not actually on the class given. Fallback to using + # getattr() if we can't find it. If it truly doesn't + # exist, then that will fail. + + if inspect.isclass(original): + for cls in inspect.getmro(original): + if attribute in vars(cls): + original = vars(cls)[attribute] + break + else: + original = getattr(original, attribute) + + else: + original = getattr(original, attribute) + + return (parent, attribute, original) + +def apply_patch(parent, attribute, replacement): + setattr(parent, attribute, replacement) + +def wrap_object(module, name, factory, args=(), kwargs={}): + (parent, attribute, original) = resolve_path(module, name) + wrapper = factory(original, *args, **kwargs) + apply_patch(parent, attribute, wrapper) + return wrapper + +# Function for applying a proxy object to an attribute of a class +# instance. The wrapper works by defining an attribute of the same name +# on the class which is a descriptor and which intercepts access to the +# instance attribute. Note that this cannot be used on attributes which +# are themselves defined by a property object. + +class AttributeWrapper(object): + + def __init__(self, attribute, factory, args, kwargs): + self.attribute = attribute + self.factory = factory + self.args = args + self.kwargs = kwargs + + def __get__(self, instance, owner): + value = instance.__dict__[self.attribute] + return self.factory(value, *self.args, **self.kwargs) + + def __set__(self, instance, value): + instance.__dict__[self.attribute] = value + + def __delete__(self, instance): + del instance.__dict__[self.attribute] + +def wrap_object_attribute(module, name, factory, args=(), kwargs={}): + path, attribute = name.rsplit('.', 1) + parent = resolve_path(module, path)[2] + wrapper = AttributeWrapper(attribute, factory, args, kwargs) + apply_patch(parent, attribute, wrapper) + return wrapper + +# Functions for creating a simple decorator using a FunctionWrapper, +# plus short cut functions for applying wrappers to functions. These are +# for use when doing monkey patching. For a more featured way of +# creating decorators see the decorator decorator instead. + +def function_wrapper(wrapper): + def _wrapper(wrapped, instance, args, kwargs): + target_wrapped = args[0] + if instance is None: + target_wrapper = wrapper + elif inspect.isclass(instance): + target_wrapper = wrapper.__get__(None, instance) + else: + target_wrapper = wrapper.__get__(instance, type(instance)) + return FunctionWrapper(target_wrapped, target_wrapper) + return FunctionWrapper(wrapper, _wrapper) + +def wrap_function_wrapper(module, name, wrapper): + return wrap_object(module, name, FunctionWrapper, (wrapper,)) + +def patch_function_wrapper(module, name): + def _wrapper(wrapper): + return wrap_object(module, name, FunctionWrapper, (wrapper,)) + return _wrapper + +def transient_function_wrapper(module, name): + def _decorator(wrapper): + def _wrapper(wrapped, instance, args, kwargs): + target_wrapped = args[0] + if instance is None: + target_wrapper = wrapper + elif inspect.isclass(instance): + target_wrapper = wrapper.__get__(None, instance) + else: + target_wrapper = wrapper.__get__(instance, type(instance)) + def _execute(wrapped, instance, args, kwargs): + (parent, attribute, original) = resolve_path(module, name) + replacement = FunctionWrapper(original, target_wrapper) + setattr(parent, attribute, replacement) + try: + return wrapped(*args, **kwargs) + finally: + setattr(parent, attribute, original) + return FunctionWrapper(target_wrapped, _execute) + return FunctionWrapper(wrapper, _wrapper) + return _decorator + +# A weak function proxy. This will work on instance methods, class +# methods, static methods and regular functions. Special treatment is +# needed for the method types because the bound method is effectively a +# transient object and applying a weak reference to one will immediately +# result in it being destroyed and the weakref callback called. The weak +# reference is therefore applied to the instance the method is bound to +# and the original function. The function is then rebound at the point +# of a call via the weak function proxy. + +def _weak_function_proxy_callback(ref, proxy, callback): + if proxy._self_expired: + return + + proxy._self_expired = True + + # This could raise an exception. We let it propagate back and let + # the weakref.proxy() deal with it, at which point it generally + # prints out a short error message direct to stderr and keeps going. + + if callback is not None: + callback(proxy) + +class WeakFunctionProxy(ObjectProxy): + + __slots__ = ('_self_expired', '_self_instance') + + def __init__(self, wrapped, callback=None): + # We need to determine if the wrapped function is actually a + # bound method. In the case of a bound method, we need to keep a + # reference to the original unbound function and the instance. + # This is necessary because if we hold a reference to the bound + # function, it will be the only reference and given it is a + # temporary object, it will almost immediately expire and + # the weakref callback triggered. So what is done is that we + # hold a reference to the instance and unbound function and + # when called bind the function to the instance once again and + # then call it. Note that we avoid using a nested function for + # the callback here so as not to cause any odd reference cycles. + + _callback = callback and functools.partial( + _weak_function_proxy_callback, proxy=self, + callback=callback) + + self._self_expired = False + + if isinstance(wrapped, _FunctionWrapperBase): + self._self_instance = weakref.ref(wrapped._self_instance, + _callback) + + if wrapped._self_parent is not None: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped._self_parent, _callback)) + + else: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + return + + try: + self._self_instance = weakref.ref(wrapped.__self__, _callback) + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped.__func__, _callback)) + + except AttributeError: + self._self_instance = None + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + def __call__(self, *args, **kwargs): + # We perform a boolean check here on the instance and wrapped + # function as that will trigger the reference error prior to + # calling if the reference had expired. + + instance = self._self_instance and self._self_instance() + function = self.__wrapped__ and self.__wrapped__ + + # If the wrapped function was originally a bound function, for + # which we retained a reference to the instance and the unbound + # function we need to rebind the function and then call it. If + # not just called the wrapped function. + + if instance is None: + return self.__wrapped__(*args, **kwargs) + + return function.__get__(instance, type(instance))(*args, **kwargs) diff --git a/medusa/helpers/utils.py b/medusa/helpers/utils.py index e1fdf240b6..92ffcd1ef4 100644 --- a/medusa/helpers/utils.py +++ b/medusa/helpers/utils.py @@ -26,3 +26,20 @@ def generate(it): def split_and_strip(value, sep=','): """Split a value based on the passed separator, and remove whitespace for each individual value.""" return [_.strip() for _ in value.split(sep) if value != ''] if isinstance(value, string_types) else value + + +def safe_get(dct, keys, default=''): + """ + Iterate over a dict with a tuple of keys to get the last value. + + :param dct: a dictionary + :param keys: a tuple of keys + :param default: default value to return in case of error + :return: value from the last key in the tuple or default + """ + for key in keys: + try: + dct = dct[key] + except KeyError: + return default + return dct diff --git a/medusa/show_queue.py b/medusa/show_queue.py index fa969fc3aa..dd15cfff64 100644 --- a/medusa/show_queue.py +++ b/medusa/show_queue.py @@ -19,7 +19,7 @@ import os import traceback -from imdbpie.exceptions import HTTPError as IMDbHTTPError +from imdbpie.exceptions import ImdbAPIError from medusa import ( app, @@ -531,7 +531,7 @@ def run(self): logger.log(u"Retrieving show info from IMDb", logger.DEBUG) try: self.show.load_imdb_info() - except IMDbHTTPError as e: + except ImdbAPIError as e: logger.log(u"Something wrong on IMDb api: " + e.message, logger.INFO) except Exception as e: logger.log(u"Error loading IMDb info: " + e.message, logger.ERROR) @@ -745,7 +745,7 @@ def run(self): logger.log(u'{id}: Retrieving show info from IMDb'.format(id=self.show.indexerid), logger.DEBUG) try: self.show.load_imdb_info() - except IMDbHTTPError as e: + except ImdbAPIError as e: logger.log(u'{id}: Something wrong on IMDb api: {error_msg}'.format (id=self.show.indexerid, error_msg=e.message), logger.INFO) except Exception as e: @@ -866,7 +866,7 @@ def run(self): logger.log(u'{id}: Retrieving show info from IMDb'.format(id=self.show.indexerid), logger.DEBUG) try: self.show.load_imdb_info() - except IMDbHTTPError as e: + except ImdbAPIError as e: logger.log(u'{id}: Something wrong on IMDb api: {error_msg}'.format (id=self.show.indexerid, error_msg=e.message), logger.INFO) except Exception as e: diff --git a/medusa/tv/series.py b/medusa/tv/series.py index af5c0d41aa..ee1f1ece18 100644 --- a/medusa/tv/series.py +++ b/medusa/tv/series.py @@ -65,6 +65,7 @@ ) from medusa.helper.mappings import NonEmptyDict from medusa.helpers.externals import get_externals, load_externals_from_db +from medusa.helpers.utils import safe_get from medusa.indexers.indexer_api import indexerApi from medusa.indexers.indexer_config import ( INDEXER_TVRAGE, @@ -1536,8 +1537,6 @@ def load_from_indexer(self, tvapi=None): def load_imdb_info(self): """Load all required show information from IMDb with ImdbPie.""" - # TODO: Use new parser or wait upstream API fix - return imdb_api = imdbpie.Imdb() if not self.imdb_id: @@ -1551,11 +1550,12 @@ def load_imdb_info(self): # Make sure we only use the first ID self.imdb_id = self.imdb_id.split(',')[0] + # Set retrieved IMDb ID as imdb_id for externals + self.externals['imdb_id'] = self.imdb_id + log.debug(u'{id}: Loading show info from IMDb with ID: {imdb_id}', {'id': self.indexerid, 'imdb_id': self.imdb_id}) - imdb_obj = imdb_api.get_title_by_id(self.imdb_id) - tmdb_id = self.externals.get('tmdb_id') if tmdb_id: # Country codes and countries obtained from TMDB's API. Not IMDb info. @@ -1569,26 +1569,26 @@ def load_imdb_info(self): self.imdb_info['countries'] = self.imdb_info.get('countries', '') self.imdb_info['country_codes'] = self.imdb_info.get('country_codes', '') - # If the show has no year, IMDb returned something we don't want - if not imdb_obj or not imdb_obj.year: - log.debug(u'{id}: IMDb returned none or invalid info for {imdb_id}, skipping update.', + imdb_info = imdb_api.get_title(self.imdb_id) + if not imdb_info: + log.debug(u"{id}: IMDb didn't return any info for {imdb_id}, skipping update.", {'id': self.indexerid, 'imdb_id': self.imdb_id}) return - # Set retrieved IMDb ID as imdb_id for externals - self.externals['imdb_id'] = self.imdb_id + # Additional query needed to get genres + imdb_genres = imdb_api.get_title_genres(self.imdb_id) self.imdb_info.update({ - 'imdb_id': imdb_obj.imdb_id, - 'title': imdb_obj.title, - 'year': imdb_obj.year, + 'imdb_id': self.imdb_id, + 'title': safe_get(imdb_info, ('base', 'title')), + 'year': safe_get(imdb_info, ('base', 'year')), 'akas': '', - 'genres': '|'.join(imdb_obj.genres or ''), - 'rating': str(imdb_obj.rating) if imdb_obj.rating else '', - 'votes': imdb_obj.votes or '', - 'runtimes': int(imdb_obj.runtime / 60) if imdb_obj.runtime else '', # Time is returned in seconds - 'certificates': imdb_obj.certification or '', - 'plot': imdb_obj.plots[0] if imdb_obj.plots else imdb_obj.plot_outline or '', + 'genres': '|'.join(safe_get(imdb_genres, ('genres',))), + 'rating': text_type(safe_get(imdb_info, ('ratings', 'rating'))), + 'votes': safe_get(imdb_info, ('ratings', 'ratingCount')), + 'runtimes': safe_get(imdb_info, ('base', 'runningTimeInMinutes')), + 'certificates': '', + 'plot': safe_get(imdb_info, ('plot', 'outline', 'text')), 'last_update': datetime.date.today().toordinal(), }) diff --git a/requirements.txt b/requirements.txt index 499564b131..a537d57d0b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ git+https://github.com/kurtmckee/feedparser.git@f1dd1bb923ebfe6482fc2521c1f150b4 futures==3.1.1 guessit==2.1.4 html5lib==0.999999999 -imdbpie==4.4.0 +imdbpie==5.2.0 jsonrpclib==0.1.7 knowit==0.2.4 lockfile==0.12.2 From 236f4ef4057aaa3a2800e67883e41044b1c287dc Mon Sep 17 00:00:00 2001 From: Dario Date: Fri, 12 Jan 2018 21:48:51 +0100 Subject: [PATCH 32/35] Add config setting conversion for SUBTITLES_EXTRA and SUBTITLES_PRE_SCRIPTS (#3604) --- medusa/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/medusa/config.py b/medusa/config.py index 6123b3b63b..f1eff103dd 100644 --- a/medusa/config.py +++ b/medusa/config.py @@ -1194,7 +1194,7 @@ def make_rss_torrent_provider(config): app.REQUIRE_WORDS = convert_csv_string_to_list(self.config_obj['General']['require_words']) app.IGNORED_SUBS_LIST = convert_csv_string_to_list(self.config_obj['General']['ignored_subs_list']) app.BROKEN_PROVIDERS = convert_csv_string_to_list(self.config_obj['General']['broken_providers']) - app.EXTRA_SCRIPTS = convert_csv_string_to_list(self.config_obj['General']['extra_scripts']) + app.EXTRA_SCRIPTS = convert_csv_string_to_list(self.config_obj['General']['extra_scripts'], '|') # Metadata app.METADATA_KODI = convert_csv_string_to_list(self.config_obj['General']['metadata_kodi'], '|') @@ -1209,6 +1209,8 @@ def make_rss_torrent_provider(config): app.SUBTITLES_LANGUAGES = convert_csv_string_to_list(self.config_obj['Subtitles']['subtitles_languages']) app.SUBTITLES_SERVICES_LIST = convert_csv_string_to_list(self.config_obj['Subtitles']['SUBTITLES_SERVICES_LIST']) app.SUBTITLES_SERVICES_ENABLED = convert_csv_string_to_list(self.config_obj['Subtitles']['SUBTITLES_SERVICES_ENABLED'], '|') + app.SUBTITLES_EXTRA_SCRIPTS = convert_csv_string_to_list(self.config_obj['Subtitles']['subtitles_extra_scripts'], '|') + app.SUBTITLES_PRE_SCRIPTS = convert_csv_string_to_list(self.config_obj['Subtitles']['subtitles_pre_scripts'], '|') # Notifications app.KODI_HOST = convert_csv_string_to_list(self.config_obj['KODI']['kodi_host']) From 1536629b21ac50faf8f60274bace58eb48fd19e5 Mon Sep 17 00:00:00 2001 From: h3llrais3r Date: Sun, 14 Jan 2018 15:31:05 +0100 Subject: [PATCH 33/35] Don't wrap status column for medium devices and up (#3593) Implements https://github.com/pymedusa/Medusa/issues/3592 --- static/css/style.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/static/css/style.css b/static/css/style.css index a057f11fa2..51181d4266 100644 --- a/static/css/style.css +++ b/static/css/style.css @@ -1709,6 +1709,12 @@ td.col-status { text-align: center; } +@media screen and (min-width: 992px) { + td.col-status { + white-space: nowrap; + } +} + th.col-quality, td.col-quality { width: 110px; From c1aefda6c3c56d9a2bbd00202f986c756e928289 Mon Sep 17 00:00:00 2001 From: p0ps Date: Mon, 15 Jan 2018 16:05:46 +0100 Subject: [PATCH 34/35] Feature/fix recomm lists (#3608) * Fix simpleanidb/models.py: Throwing exception on a corrupted enddate. * Fix imdb recommended lists, after implemented new api. --- lib/simpleanidb/models.py | 10 ++++++++-- medusa/show/recommendations/imdb.py | 29 +++++++++++++++-------------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/lib/simpleanidb/models.py b/lib/simpleanidb/models.py index 57614a8f08..fc6c12f2da 100644 --- a/lib/simpleanidb/models.py +++ b/lib/simpleanidb/models.py @@ -82,9 +82,15 @@ def fill_from_xml(self, xml): # pylint: disable=too-many-branches if xml.find('tags') is not None: self.tags = sorted([Tag(self, t) for t in xml.find('tags') if t.text.strip()]) if xml.find('startdate') is not None: - self.start_date = date_to_date(xml.find('startdate').text) + try: + self.start_date = date_to_date(xml.find('startdate').text) + except TypeError: + self.start_date = None if xml.find('enddate') is not None: - self.end_date = date_to_date(xml.find('enddate').text) + try: + self.end_date = date_to_date(xml.find('enddate').text) + except TypeError: + self.end_data = None if xml.find('description') is not None: self.description = xml.find('description').text diff --git a/medusa/show/recommendations/imdb.py b/medusa/show/recommendations/imdb.py index 7a256a2915..5c77a755c6 100644 --- a/medusa/show/recommendations/imdb.py +++ b/medusa/show/recommendations/imdb.py @@ -70,8 +70,8 @@ def _create_recommended_show(self, show_obj): 'image_href': show_obj.get('imdb_url')} ) - if show_obj.get('image_url_large'): - rec_show.cache_image(show_obj.get('image_url_large')) + if show_obj.get('image_url'): + rec_show.cache_image(show_obj.get('image_url')) return rec_show @@ -84,31 +84,32 @@ def fetch_popular_shows(self): tvdb_mapping_cache.clean() imdb_api = imdbpie.Imdb() - imdb_result = imdb_api.popular_shows() + imdb_result = imdb_api.get_popular_shows() - for imdb_show in imdb_result: + for imdb_show in imdb_result['ranks']: show = dict() - imdb_tt = imdb_show['tconst'] + imdb_tt = imdb_show['id'].strip('/').split('/')[-1] if imdb_tt: - show['imdb_tt'] = imdb_show['tconst'] + show['imdb_tt'] = imdb_tt cached_show_details = imdb_show_details_cache.get(imdb_tt) if not cached_show_details: - show_details = imdb_api.get_title_by_id(imdb_tt) + show_details = imdb_api.get_title(imdb_tt) imdb_show_details_cache.append(imdb_tt, show_details) else: show_details = cached_show_details.value if show_details: - show['year'] = getattr(show_details, 'year') - show['name'] = getattr(show_details, 'title') - show['image_url_large'] = getattr(show_details, 'cover_url') + show['year'] = imdb_show['year'] + show['name'] = imdb_show['title'] + show['image_url_large'] = imdb_show['image']['url'] show['image_path'] = posixpath.join('images', 'imdb_popular', os.path.basename(show['image_url_large'])) - show['imdb_url'] = 'http://www.imdb.com/title/{imdb_tt}'.format(imdb_tt=imdb_tt) - show['votes'] = getattr(show_details, 'votes', 0) - show['outline'] = getattr(show_details, 'plot_outline', 'Not available') - show['rating'] = getattr(show_details, 'rating', 0) + show['image_url'] = '{0}{1}'.format(imdb_show['image']['url'].split('V1')[0], '_SY600_AL_.jpg') + show['imdb_url'] = 'http://www.imdb.com{imdb_id}'.format(imdb_id=imdb_show['id']) + show['votes'] = show_details['ratings']['ratingCount'] + show['outline'] = show_details['plot'].get('outline', {}).get('text') + show['rating'] = show_details['ratings']['rating'] else: continue From e1628534d1756bed5adbe76233673ee321de70c0 Mon Sep 17 00:00:00 2001 From: Dario Date: Mon, 15 Jan 2018 20:18:24 +0100 Subject: [PATCH 35/35] Fix logging in massEditSubmit. Fixes #3160 (#3609) --- medusa/server/web/manage/handler.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/medusa/server/web/manage/handler.py b/medusa/server/web/manage/handler.py index 8d73ad1614..7d30ccaca0 100644 --- a/medusa/server/web/manage/handler.py +++ b/medusa/server/web/manage/handler.py @@ -590,14 +590,14 @@ def massEditSubmit(self, paused=None, default_ep_status=None, dvd_order=None, if not show_obj: continue - cur_root_dir = os.path.dirname(show_obj._location) # pylint: disable=protected-access - cur_show_dir = os.path.basename(show_obj._location) # pylint: disable=protected-access + cur_root_dir = os.path.dirname(show_obj._location) + cur_show_dir = os.path.basename(show_obj._location) if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]: new_show_dir = os.path.join(dir_map[cur_root_dir], cur_show_dir) - logger.log(u'For show {show.name} changing dir from {show.location} to {location}'.format - (show=show_obj, location=new_show_dir)) # pylint: disable=protected-access + logger.log(u'For show {show.name} changing dir from {show._location} to {location}'.format + (show=show_obj, location=new_show_dir)) else: - new_show_dir = show_obj._location # pylint: disable=protected-access + new_show_dir = show_obj._location if paused == 'keep': new_paused = show_obj.paused